massive rework of mangacli implement progress bars, rework renaming, moving tagging, implement database for caching add logging include ended entries in search but remove truly done entries

This commit is contained in:
2025-05-06 20:47:55 +02:00
parent 47a6fe9fd9
commit 8e3649afe1

View File

@@ -1,44 +1,77 @@
import os
import re
import shutil
import subprocess
import time
import zipfile
import jaro
from src.data.komga import KomgaAPI
from komgapi import komgapi
from komgapi.schemas.Series import Series
from src.data.mangadex import MangadexAPI
from src.data.cache import ListCache
from src.data.Feeds.nyaasi import Nyaa
from src.data.Feeds.nyaasi import NyaaFeed
from komsuite_nyaapy import Torrent
from src.logic.download import Download
from komconfig import KomConfig
from komcache import KomCache
from src.logic.utils import (
detect_chapters,
rename,
tag_folder,
move,
rename_folder,
remove_empty_folders,
time_checker,
folder_similarity,
calculate_new_volumes,
safe_remove_directory,
)
from src.logic.db_schemas import (
KOMGRABBER_TABLE,
INSERT_KOMGRABBER,
SELECT_KOMGRABBER,
UPDATE_KOMGRABBER,
LASTCHECKED_KOMGRABBER,
GET_LASTCHECKED_KOMGRABBER,
)
import loguru
import sys
from pathlib import Path
from alive_progress import alive_it
from typing import Any
config = KomConfig()
logs = loguru.logger
logs.remove()
logs.add("komgrabber.log", level="INFO")
log = loguru.logger
log.remove()
log.add("logs/application.log", level="INFO", rotation="15MB", retention="1 week")
log.add("logs/cli.log", rotation="15MB", retention="1 week") # type:ignore
# log.add(sys.stdout)
Komga = KomgaAPI()
md = MangadexAPI()
LINE_CLEAR = "\x1b[2K" # <-- ANSI sequence
failed_items = []
LINE_CLEAR = "\x1b[2K"
failed_items: list[str] = []
class utils:
def __init__(self) -> None:
self.dl = Download("/home/alexander/Downloads/torrents/Manga_test/")
class mangaCli:
def __init__(self, library_id: str = "") -> None:
self.dl = Download(config.komgrabber.download_location)
if os.path.exists(config.komgrabber.download_location):
for file in os.listdir(config.komgrabber.download_location):
try:
os.remove(f"{config.komgrabber.download_location}/{file}")
except:
shutil.rmtree(f"{config.komgrabber.download_location}/{file}")
else:
os.mkdir(config.komgrabber.download_location)
self.file = None
self.serie = ""
self.serie_id = ""
self.series_data: Series = None
self.series_data: Series
self.volumes = []
self.download_path = config.komgrabber.download_location
if "~" in self.download_path:
self.download_path = os.path.expanduser(self.download_path)
self.cache = KomCache | None
if config.komgrabber.use_cache:
self.cache = KomCache()
self.cache.create_table(KOMGRABBER_TABLE)
# self.allSeries = Komga.getAllSeries()
pass
@@ -49,6 +82,8 @@ class utils:
return True
else:
return False
else:
return False
def __epub_check(title: str) -> bool:
if title.endswith(".epub"):
@@ -56,44 +91,59 @@ class utils:
else:
return False
# check if download location is empty, if not, remove everything in it
if os.path.exists(self.download_path):
# force stop the download
if len(self.dl.api.get_downloads()) > 0:
self.dl.api.get_downloads()[0].remove(force=True)
time.sleep(5)
file: str
file = self.dl.get_file(feed_url)
if __chapter_check(file):
print(f"Skipping {file}, reason: no volume number, likely a chapter")
# print(f"Skipping {file}, reason: no volume number, likely a chapter")
return False
if __epub_check(file):
print(f"Skipping {file}, reason: epub file")
# print(f"Skipping {file}, reason: epub file")
return False
self.file = file
print(f"Filename: {file}")
# print(f"Filename: {file}")
file_move = False
if file.endswith(".cbz") or file.endswith(".cbr"):
new_folder = f"{self.download_path}{self.serie}"
new_folder = Path(self.download_path, self.serie)
os.makedirs(new_folder, exist_ok=True)
file_move = True
state = self.dl.add_torrent(feed_url.split("/")[-1])
if state is False:
print("Error adding torrent")
# print("Error adding torrent")
return False
gid = self.dl.api.get_downloads()[0].gid
# check if the download is complete usin the gid
# check if the download is complete using the gid
dl_complete = True
check_done = False
while not self.dl.api.get_downloads(gids=[gid])[0].seeder:
# while not self.dl.api.get_downloads()[0].seeder:
progress = self.dl.check_progress()
progress = "{:.2f}".format(progress)
eta = self.dl.api.get_downloads()[0].eta_string()
print(end=LINE_CLEAR)
print("Progress: ", progress, "ETA: ", eta, end="\r")
# eta = self.dl.api.get_downloads()[0].eta_string() #
# print(end=LINE_CLEAR)
# print("Progress: ", progress, "ETA: ", eta, end="\r")
# if progress remains the same for 30 seconds, stop the download
progress = self.dl.check_progress()
time.sleep(30)
time.sleep(45)
n_progress = self.dl.check_progress()
dl_name = self.dl.api.get_downloads()[0].name
if not folder_similarity(self.serie.lower(), dl_name.lower()) > 0.8:
log.error(
f"Folder name {dl_name} does not match {self.serie}, skipping download"
)
self.dl.api.get_downloads()[0].remove(force=True)
dl_complete = False
break
if not check_done:
local_files = os.listdir(f"{self.download_path}")
for f in local_files:
@@ -129,20 +179,26 @@ class utils:
continue
vol = int(vol)
local_files_volumes.append(vol)
print(f"Grabbed volumes: {local_files_volumes}")
print(f"Komga volumes: {local_volumes}")
log.info(
"Grabbed volumes: {}, Komga volumes: {}".format(
sorted(local_files_volumes), local_volumes
)
)
if local_files_volumes == []:
pass
# check íf any local_file_volumes are not in local_volumes
if all([vol in local_volumes for vol in local_files_volumes]):
print("all volumes downloaded, stopping...")
log.info("all volumes downloaded, stopping...")
dl_complete = False
break
else:
print("not all volumes downloaded, continuing...")
log.info("not all volumes downloaded, continuing...")
check_done = True
if progress == n_progress:
print("Progress has not changed for 30 seconds, stopping the download")
log.debug(
"Progress has not changed for 30 seconds, stopping the download"
)
self.dl.api.get_downloads()[0].remove(force=True)
dl_complete = False
break
@@ -154,26 +210,16 @@ class utils:
except:
pass
self.dl.remove_torrents()
print(end=LINE_CLEAR)
print("Download complete")
# print(end=LINE_CLEAR)
# print("Download complete")
# self.dl.download(feed_url, file_rename=True)
if not dl_complete:
# remove everything from the download folder
data = os.listdir(f"{self.download_path}")
for file in data:
try:
os.remove(f"{self.download_path}{file}")
except IsADirectoryError:
shutil.rmtree(f"{self.download_path}{file}")
return False
if dl_complete is True:
# for dfile in os.listdir(f'{self.download_path}{file}'):
# if __chapter_check(dfile):
# os.remove(f'{self.download_path}{file}{dfile}')
try:
if file_move is True:
shutil.move(
f"{self.download_path}{file}",
Path(self.download_path, file),
f"{new_folder}/{file}",
)
except Exception as e:
@@ -182,365 +228,324 @@ class utils:
return True
return False
def tag_files(self, folder: str, interactive: bool = False):
"""Tag all files in the specified folder.
Args:
----
- folder (str): the path to the folder containing the files to tag
- interactive (bool, optional): if set to True, the shell will pause and await user input instead of not writing data to file. Defaults to False.
"""
def is_valid_cbz(file_path) -> bool:
try:
with zipfile.ZipFile(file_path, "r") as cbz_file:
# Check if the file is a valid ZIP archive
if cbz_file.testzip() is not None:
return False
# Check if the CBZ file contains at least one image file
for file_info in cbz_file.infolist():
if (
not file_info.is_dir()
and file_info.filename.lower().endswith(
(".jpg", ".jpeg", ".png")
)
):
return True
return False
except (zipfile.BadZipFile, FileNotFoundError):
return False
for file in os.listdir(f"{folder}"):
print(f"Checking {file}")
# if file is a not cbz file, skip
if not file.endswith(".cbz"):
print(f"Skipping {file}")
continue
try:
# if not is_valid_cbz(f"{folder}/{file}"):
# print(f"removing {file}, not a valid cbz file")
# os.remove(f"{folder}/{file}")
# continue
print(f"Tagging {file}")
regex = r"v(\d{2,3}) #(\d{2,3})"
match = re.search(regex, file)
if not match:
print(f"Skipping {file}, no match")
os.remove(f"{folder}/{file}")
continue
if interactive:
subprocess.call(
f'comictagger -s -t cr -f -o "{folder}/{file}" --nosummary --overwrite -i',
shell=True,
)
subprocess.call(
f'comictagger -s -t cr -f -o "{folder}/{file}" --nosummary --overwrite',
shell=True,
)
print(f"Tagged {file}")
except Exception as e:
print(e)
continue
def rename_folder_and_files(self, file: str, komga_data, remove=False):
logs.info(f"Renaming {file}")
# rename the folder to the komga name
series_id = komga_data.id
series_name = komga_data.name
new_folder = f"{self.download_path}{series_name}"
try:
os.rename(f"{self.download_path}{file}", new_folder)
except Exception as e:
print(e)
try:
files = os.listdir(new_folder)
except NotADirectoryError:
return
volumes = []
for file in files:
if not (file.endswith(".cbz") or file.endswith(".cbr")):
print(f"Skipping {file}, not a comicarchive file")
continue
ext = file.split(".")[-1]
# match = re.search(r"v\d{2,4}(-\d{2,4})*", file)
match = re.search(r"v\d{2,4} ", file)
if match:
# print(match)
split_start = match.start()
split_end = match.end()
# Split the filename between split_start and split_end
volume = file[split_start:split_end]
# Split the filename at the split index, but keep the "v" and digits in the title
title = file[:split_start].strip()
# add the volume number to the title as a suffix #nr
title = f"{title} {volume} #{volume.replace('v', '')}".strip().replace(
" ", " "
)
# print(title)
# rename the file
os.rename(f"{new_folder}/{file}", f"{new_folder}/{title}.{ext}")
volumes.append(int(volume.replace("v", "")))
logs.info(f"Renamed {file} to {title}")
if remove:
print("removing files that are already in komga")
# search komga_name in series
# get all volumes of the serie
local_volumes = Komga.getVolumes(series_id=series_id)
# remove the volumes that are already in komga
self.remove_if_alr_in_db(local_volumes, volumes, series_name)
self.tag_files(new_folder)
def process_serie(self, data: Series):
"""Pprocess a single serie based on its title.
def process_serie(self, data: Series) -> list[Torrent]:
"""Process a single serie based on its title.
The process is as follows:
1. get all volumes of the serie from komga using the api
2. get all feed entries from nyaa.si using the api
3. compare the volumes from komga with the volumes from nyaa.si
3.1 if the volumes from nyaa.si are greater than the volumes from komga, add the entry to the download list.
4. if the volumes from nyaa.si are greater than the volumes from komga, add the entry to the download list.
Args:
----
- data (dict): a dict containing the title of the serie at ["title"] and the id of the serie at ["id"]
Returns:
-------
- list[dict]: a list of dictionaries containing the entries to download
"""
serie = data.name
series_id = data.id
vols = Komga.getVolumes(series_id=series_id, unpaged=True)
feed_titles = Nyaa.search(keyword=serie, category=3, subcategory=1)
print(feed_titles)
feed_titles = NyaaFeed().search(serie)
f_d = []
if feed_titles == []:
failed_items.append(serie)
added_max_vols = vols if vols else [0]
# #print(len(added_max_vols))
for entry in feed_titles:
if entry.seeders > 0:
if (
serie.lower() in entry.name.lower()
or jaro.jaro_metric(entry.name.lower(), serie.lower()) > 0.7
):
volumes = entry["volumes"]
if isinstance(volumes, list):
volumes = volumes[
::-1
] # reverse the list to get the highest volume number quickly
for vol in volumes:
if vol not in added_max_vols:
f_d.append(entry)
added_max_vols.append(vol)
break
# get the entry with the most volumes
filesizes = entry.filesizes
volumes = entry.volumes
min_size = len(volumes) * config.komgrabber.manga.min_filesize
if filesizes < min_size:
log.info(
f"Skipping {entry.name}, Reason: Filesize is too small"
)
continue
if max(volumes) > max(added_max_vols):
f_d.append(entry) # = entry
# added_max_vols = volumes
else:
continue
# return entry with the most volumes
return f_d
def media_grabber(self, serie: Series):
def media_grabber(self, serie: Series, bar: Any = None) -> bool:
result = self.process_serie(serie)
logs.info(f"Found {len(result)} new volumes for {serie.name}")
logs.info(f"Data: {result}")
print(
f"current volumes: {Komga.getVolumes(series_id=serie.id, unpaged=True)}, new volumes: {result}"
)
# print(result)
if len(result) != 0:
for entry in result:
# print(entry["link"])
if self.download(entry["link"]) is True:
print("renaming...")
self.rename_folder_and_files(
self.file, komga_data=serie, remove=True
)
# self.move_to_komga(serie=entry)
print("done")
return True
else:
# remove the folder
try:
folders = os.listdir(self.download_path)
for folder in folders:
os.remove(f"{self.download_path}{folder}")
except Exception as e:
print(e)
total_new_volumes: list[tuple[Torrent, list[int]]] = []
fs_per_volume = config.komgrabber.manga.min_filesize
series_volumes = Komga.getVolumes(series_id=serie.id, unpaged=True)
if result is None or result == []:
log.info(f"Could not find any new volumes for {serie.name}")
return False
def remove_if_alr_in_db(
self, present_volumes: list, downloaded_volumes: list, folder: str
):
"""Delete any file from the folder that is already in the database, or does not conform to the naming convention.
Args:
----
present_volumes (list): a list of volumes that are already in the database, retrieved from komga api
downloaded_volumes (list): the list of volumes that are downloaded from the corresponding feed/api
folder (str): relative path to the folder containing the downloaded files
"""
print(f"present_volumes: {present_volumes}")
print(f"downloaded_volumes: {downloaded_volumes}")
content_folder = f"{self.download_path}{folder}"
content_files = [file for file in os.listdir(content_folder)]
print(f"content_files: {content_files}")
duplicates = [any(file in content_files for file in present_volumes)]
for file in os.listdir(content_folder):
if "#" not in file:
try:
os.remove(os.path.join(content_folder, file))
if IsADirectoryError:
shutil.rmtree(os.path.join(content_folder, file))
if FileNotFoundError:
continue
except Exception as e:
# print(e)
bar.text(f"Downloading new volumes for {serie.name}...")
for res in result:
log.info(f"{res.name}, Volumes: {res.volumes}")
if res.volumes != [0]:
min_size = len(res.volumes) * fs_per_volume
if res.filesizes < min_size:
log.info(f"Skipping {res.name}, Reason: Filesize is too small")
result.remove(res)
continue
# print(f"removed {file}, Reason: not a valid file")
content_files.remove(file)
for vol in present_volumes:
if vol < 10:
vol = f"0{vol}"
for file in content_files:
if str(vol) in file:
# print(f"removing {vol}")
try:
os.remove(os.path.join(content_folder, file))
except:
print(f"could not remove {vol}")
def move_to_komga(self, serie: tuple[str, str] = None):
komga_path = f"{config.komga.media_path}{self.serie}"
# print(f"komga_path: {komga_path}")
# print("moving to komga")
# move files to komga
for file in os.listdir(f"{self.download_path}{self.serie}"):
file_path = os.path.join(f"{self.download_path}{self.serie}", file)
final_path = os.path.join(komga_path, file)
for res in result:
log.debug("present: {}, new: {}".format(series_volumes, res.volumes))
shutil.move(file_path, final_path)
new_volumes = calculate_new_volumes(series_volumes, res.volumes)
if len(new_volumes) == 0:
log.info(f"Skipping {res.name}, Reason: No new Volumes found")
continue
total_new_volumes.append((res, new_volumes))
if len(total_new_volumes) == 0:
log.info(f"Could not find any new volumes for {serie.name}")
return False
total_new_volumes = sorted(
total_new_volumes, key=lambda x: len(x[1]), reverse=True
)
res = total_new_volumes[0][0]
print(f"moved {file} to {komga_path}")
# delete empty folder
try:
os.rmdir(f"{self.download_path}{self.serie}")
logs.info(f"moved {self.serie} to komga")
except:
print(f"could not remove {self.serie}")
logs.error(f"could not remove {self.serie}")
return self
log.info(f"Found {len(total_new_volumes[0][1])} new entries for {serie.name}")
# log.info(
# f"Found {len(new_volumes)} new {'volume' if len(new_volumes) == 1 else 'volumes'} for {serie.name}"
# )
# # check if the new volumes were aleady downloaded
# log.info(f"current volumes: {series_volumes}, new volumes: {new_volumes}")
# # print(result)
if self.download(res.download_url) is True:
log.success(f"Downloaded {res.name}")
# self.rename_folder_and_files(self.file, komga_data=serie, remove=True)
# self.move_to_komga(serie=entry)
log.info("Renaming and tagging files")
rename()
if not config.komgrabber.get_chapters:
detect_chapters()
tag_folder()
if rename_folder(series=serie):
move(self.download_path, config.komga.media_path)
else:
log.info("Seems like we grabbed the wrong series, oops")
failed_items.append(serie.name)
# clear folder
# remove the download dir and create it anew
remove_empty_folders(self.download_path)
safe_remove_directory(self.download_path)
return True
def search_for_new_volumes(self):
cache_present = False
if self.cache:
cache_present = True
series = Komga.series_controller.getAllSeries(
body={
"condition": {
"seriesStatus": {
"operator": "is",
"value": "HIATUS",
"value": "ENDED",
}
"anyOf": [
{"seriesStatus": {"operator": "is", "value": "ONGOING"}},
{"seriesStatus": {"operator": "is", "value": "HIATUS"}},
{"seriesStatus": {"operator": "is", "value": "ENDED"}},
]
}
}
)
shutil.rmtree(self.download_path)
komga_series: list[Series] = []
shutil.rmtree(self.download_path, ignore_errors=True)
os.mkdir(self.download_path)
for serie in series:
position = series.index(serie)
print("Working on serie", position, "of ", len(series))
logs.info(f"searching for new volumes for {serie.name}")
print(serie.name)
log.info(f"{len(series)} series found")
today = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
today = time.mktime(time.strptime(today, "%Y-%m-%d %H:%M:%S"))
def cache_ending(bar):
bar.title("Updating cache entries")
bar.text("Cache updated, continuing...")
def series_ending(bar):
bar.title("Completed searching for new volumes")
bar.text("All series checked, exiting...")
def skip_ending(bar):
bar.title("Skipping series")
bar.text("Skipped series, continuing...")
def ended_ending(bar):
bar.title("Skipping finished series")
bar.text("Finished check, continuing to search new volumes...")
if cache_present:
log.info("Cache present, checking for missing entries")
cacheBar = alive_it(
series,
finalize=cache_ending, # type:ignore
bar="smooth",
spinner="dots",
receipt_text=True,
)
for serie in cacheBar:
data = self.cache.query(SELECT_KOMGRABBER, (serie.id,))
log.debug(
f"Cache data: {data}, Serie: {serie.name}, Status: {serie.metadata.status}"
)
if data:
if data[3] == serie.metadata.status:
continue
elif data and data[3] != serie.metadata.status:
self.cache.update(
UPDATE_KOMGRABBER,
(serie.name, serie.metadata.status, serie.id),
)
log.info(f"Serie {serie.name} updated")
time.sleep(0.05)
else:
self.cache.insert(
INSERT_KOMGRABBER, (serie.name, serie.id, serie.metadata.status)
)
log.info(f"Serie {serie.name} added to cache")
log.debug("Cache created, added missing entries")
time.sleep(0.5)
if cache_present:
skipBar = alive_it(
series,
bar="smooth",
spinner="dots",
receipt_text=True,
finalize=skip_ending,
)
for serie in skipBar:
last_checked = self.cache.query(
GET_LASTCHECKED_KOMGRABBER, (serie.id,)
)[0]
# convert timestamp to epoch float for comparison
if last_checked:
last_checked = time.mktime(
time.strptime(last_checked, "%Y-%m-%d %H:%M:%S")
)
# if difference between last_checked and today is less than config.komgrabber.cache_check_interval, skip entry
time_difference = time_checker(last_checked, today)
# if time difference is less than set in the settings and the series status is not ended and the book count is not the same as the total book count, skip the entry
if time_difference < config.komgrabber.cache_check_interval:
komga_series.append(serie)
log.debug(f"Added {serie.name} to the list")
if (
serie.metadata.status == "ENDED"
and serie.booksCount == serie.metadata.totalBookCount
):
log.debug(
f"Serie {serie.name} if finished and has all volumes present, skipping..."
)
else:
komga_series.append(serie)
time.sleep(0.005)
log.debug(len(komga_series))
log.info("Finished checking cache, continuing...")
log.info("There are {} series to check".format(len(komga_series)))
time.sleep(0.05)
pBar = alive_it(
komga_series,
finalize=series_ending,
title="Searching for new volumes",
)
for serie in pBar:
pBar.text(f"Searching for new volumes for {serie.name}")
log.info(
f"searching for new volumes for {serie.name}, currently at {serie.booksCount} volumes"
)
self.series_data = serie
self.serie = serie.name
self.serie_id = serie.id
if self.media_grabber(serie) is True:
self.move_to_komga(serie)
self.media_grabber(serie, bar=pBar)
if cache_present:
self.cache.update(LASTCHECKED_KOMGRABBER, (serie.id,))
time.sleep(5)
# print("done", serie.name)
return self
def add_missing_to_db(self):
database_series = ListCache("mangacache.db").get_all_series("name")
database_series = [serie[0] for serie in database_series]
database_set = set(database_series)
# print(database_series)
komga_series = Komga.series_controller.getAllSeries()
db_added = []
for serie in komga_series:
if serie.id not in database_set:
# print(serie.id)
db_added.append(serie)
ListCache("mangacache.db").add_series(
serie.id, serie.name, serie.metadata.status
)
else:
print(f"{serie.id} already in db")
print("added to db:", len(db_added))
# print(f"{serie[1]} has status {komga_series}")
@DeprecationWarning
def get_md_metadata(self, id: str):
data = md.get_metadata(id, lang="en")
db_data = ListCache("mangacache.db").get_series_by_id(id, "mangadex_id")
def automated(self, series_data: tuple[str, str]):
"""_summary_.
Args:
----
series_data (list[tuple[str,str]]): _description_
"""
if self.media_grabber(series_data) is True:
self.move_to_komga(series_data)
time.sleep(5)
def parallel_execution(series: list[tuple[str, str, str]]):
"""_summary_.
Args:
----
series (list[tuple[str,str,str]]): _description_
"""
th = utils()
for serie in series:
th.automated(serie)
@DeprecationWarning
def update_state():
database_series = ListCache("mangacache.db").get_all_series()
database_series = [serie for serie in database_series if serie[3] != "ENDED"]
for serie in database_series:
komga_series = Komga.getSeriesStatus(serie)
if komga_series == "ONGOING":
continue
else:
ListCache("mangacache.db").update_database(
komga_id=serie[2], complete=komga_series
def search_for_series(self, series: list[str]):
cache_present = False
if self.cache:
cache_present = True
shutil.rmtree(self.download_path, ignore_errors=True)
os.mkdir(self.download_path)
series_request = []
for serie in series:
series_request.append(
{"title": {"operator": "is", "value": serie}},
)
request_body = {"condition": {"anyOf": series_request}}
series = Komga.series_controller.getAllSeries(body=request_body)
def series_ending(bar):
bar.title("Completed searching for new volumes")
bar.text("All series checked, exiting...")
pBar = alive_it(series, finalize=series_ending)
for serie in pBar:
pBar.text(f"Searching for new volumes for {serie.name}")
log.info(
f"searching for new volumes for {serie.name}, currently at {serie.booksCount} volumes"
)
self.series_data = serie
self.serie = serie.name
self.serie_id = serie.id
self.media_grabber(serie)
if cache_present:
self.cache.update(LASTCHECKED_KOMGRABBER, (serie.id,))
time.sleep(5)
# print("done", serie.name)
return self
pass
class ebookCli:
def __init__(self) -> None:
self.dl = Download(config.komgrabber.download_location)
self.file = None
self.serie = ""
self.serie_id = ""
self.series_data: Series
self.volumes = []
self.download_path = config.komgrabber.download_location
# self.allSeries = Komga.getAllSeries()
pass
def search_for_new_volumes(self):
folder = config.komgrabber.ebook.data_directory
series = os.listdir(folder)
def avail_check():
komga_avail = True
return (True, komga_avail)
def main():
utils().search_for_new_volumes()
def search_all():
mangaCli().search_for_new_volumes()
komga = komgapi(config.komga.user, config.komga.password, config.komga.url)
libraries = komga.library_controller.getLibraries()
for library in libraries:
komga.library_controller.scanLibrary(library.id)
print(f"Initialized scan for library {library.name}")
# update_state()
print("Failed series:\n", failed_items)
def search_series(series: list[str]):
mangaCli().search_for_series(series)
# update_state()
print("Failed series:\n", failed_items)
if __name__ == "__main__":
utils().search_for_new_volumes()
search_all()