#!/usr/bin/env python3 """ A script for updating mods in a minecraft modpack. """ import os import re import sqlite3 from datetime import datetime import bs4 import requests import config_master _con = sqlite3.connect("modpack.db", detect_types=sqlite3.PARSE_DECLTYPES) _cur = _con.cursor() class Mod: """Represents a mod.""" title = "" url = "" release_phase = "" dl_link = "" filename = "" upload_date = "" def __init__(self, data=()): if data: self.from_tup(data) def from_tup(self, data): """ Updates the mod's information with that retrieved from the database. """ self.title = data[0] self.url = data[1] self.release_phase = data[2] self.filename = data[3] self.upload_date = data[4] def save(self): """Inserts the mod into the `mod` table of the database.""" _cur.execute( "INSERT OR REPLACE INTO mod VALUES (?, ?, ?, ?, ?)", ( self.title, self.url, self.release_phase, self.filename, self.upload_date ), ) _con.commit() def init_db(): """ Initializes the database. """ try: _cur.execute("SELECT * FROM `mod`").fetchone() _cur.execute("SELECT * FROM `change_item`").fetchone() except sqlite3.OperationalError: _cur.execute( "CREATE TABLE mod(" + "title TEXT PRIMARY KEY," + "url TEXT," + "release_phase TEXT," + "filename TEXT," + "upload_date TIMESTAMP" + ")" ) _cur.execute( "CREATE TABLE change_item(" + "date TIMESTAMP DEFAULT CURRENT_TIMESTAMP," + "text TEXT" + ")" ) _con.commit() def scrape_curse_forge(url, phase=None): """ Scrapes all relevent info from the provided mod page and returns a mod object. """ print("Scraping:", url) res = requests.get(url, verify=True) res.raise_for_status() soup = bs4.BeautifulSoup(res.text, "html.parser") mod = Mod() bar = soup.find("h3", text="Recent Files").parent.parent bar = bar.find("div", class_="cf-sidebar-inner") hs = bar.find_all("h4") h = [h for h in hs if h.text.strip() ==f"Minecraft {config_master.mc_ver}"] if not h: print("No valid files for this version found.") return False h = h[0] files = bar.contents[bar.contents.index(h)+2].find_all("li") if phase: for li in files: if li.div.div.get("title") == phase: break else: print("No valid files for this release phase found.") return False else: li = files[0] mod.title = re.search(r"-(.*?)- Mods", soup.title.text).group(1).strip() mod.url = url mod.release_phase = li.div.div.get("title") mod.dl_link = "https://minecraft.curseforge.com" mod.dl_link += li.contents[3].a.get("href") mod.filename = li.contents[3].contents[3].a.get("data-name") mod.upload_date = datetime.utcfromtimestamp(int(li.abbr.get("data-epoch"))) return mod def download_mod(mod): """Downloads the mod.""" print("Downloading:", mod.filename) res = requests.get(mod.dl_link) res.raise_for_status() fname = res.headers.get('content-disposition') if fname: mod.filename = re.search(r'filename="(.+)"', fname).group(1) else: mod.filename = res.url.split('/')[-1] with open(os.path.join("mods", mod.filename), 'wb') as file: for chunk in res.iter_content(100000): file.write(chunk) def get_mod(target): """Gets the specified mod in the modpack.""" data = _cur.execute( "SELECT * FROM mod WHERE url = ?", (target,) ).fetchone() mod = Mod(data) return mod def get_all_mods(): """Retrieves all mods from the database.""" data = _cur.execute("SELECT * FROM mod ORDER BY title ASC").fetchall() mods = [] for line in data: mods.append(Mod(line)) return mods def log_change(text): """Logs the provided message as a change under today's date.""" _cur.execute( "INSERT INTO change_item(text) VALUES(?)", (text,) ) _con.commit() def get_all_changes(): """Retrieves all change log items.""" data = _cur.execute( "SELECT date(date), text FROM change_item ORDER BY date DESC" ).fetchall() dates = [tup[0] for tup in data] data = {date: [tup[1] for tup in data if tup[0] == date] for date in dates} return data def add_mod(target, phase=None): """Adds the specified mod to the modpack.""" mod = scrape_curse_forge(target, phase) download_mod(mod) mod.save() log_change(f"Added {mod.title}") def add_mod_all(target): """Reads a list of URLs from a textfile and adds them to the modpack.""" with open(target, 'r') as file: urls = file.read().splitlines() for url in urls: mod = scrape_curse_forge(url) download_mod(mod) mod.save() log_change(f"Added mods in bulk from {os.path.basename(target)}") def update_mod(target, delete=False, phase=None): """Updates the specified mod.""" mod_current = get_mod(target) if not phase: phase = mod_current.release_phase mod_latest = scrape_curse_forge(mod_current.url, phase) if mod_latest.upload_date > mod_current.upload_date: print(f"Updating {mod_latest.title} to {mod_latest.filename}") download_mod(mod_latest) mod_latest.save() log_change(f"Updated {mod_latest.title} to {mod_latest.filename}") if not delete: os.makedirs("mods_old", exist_ok=True) os.rename(os.path.join("mods", mod_current.filename), os.path.join("mods_old", mod_current.filename) ) else: os.remove(os.path.join("mods", mod_current.filename)) def update_mod_all(delete=False): """Updates all mods in the pack.""" for mod in get_all_mods(): update_mod(mod.url, delete) def generate_summary(): """Generates the summary HTML file.""" mods = get_all_mods() changes = get_all_changes() with open("template.html", 'r') as file: html = file.read() change_log = "" for date, items in changes.items(): change_log += f"{date}