diff --git a/nyaapy/nyaa.py b/nyaapy/anime_site.py similarity index 68% rename from nyaapy/nyaa.py rename to nyaapy/anime_site.py index 6735e87..e94acbd 100644 --- a/nyaapy/nyaa.py +++ b/nyaapy/anime_site.py @@ -1,27 +1,26 @@ import requests -from nyaapy import utils from nyaapy import torrent +from nyaapy.parser import parse_nyaa, parse_single, parse_nyaa_rss +class AnimeTorrentSite: + SITE = torrent.TorrentSite.NYAASI + URL = "https://nyaa.si" -class Nyaa: - - def __init__(self): - self.SITE = utils.TorrentSite.NYAASI - self.URL = "https://nyaa.si" - - def last_uploads(self, number_of_results): + @classmethod + def last_uploads(self, number_of_results: int): r = requests.get(self.URL) # If anything up with nyaa servers let the user know. r.raise_for_status() - json_data = utils.parse_nyaa( - request_text=r.text, limit=number_of_results + 1, site=self.SITE + json_data = parse_nyaa( + request_text=r.text, limit=number_of_results, site=self.SITE ) return torrent.json_to_class(json_data) - def search(self, keyword, **kwargs): + @classmethod + def search(self, keyword: str, **kwargs): base_url = self.URL user = kwargs.get("user", None) @@ -67,28 +66,30 @@ class Nyaa: http_response.raise_for_status() if user: - json_data = utils.parse_nyaa( - request_text=http_response.text, limit=None, site=self.SITE + json_data = parse_nyaa( + request_text=http_response.content, limit=None, site=self.SITE ) else: - json_data = utils.parse_nyaa_rss( - request_text=http_response.text, limit=None, site=self.SITE + json_data = parse_nyaa_rss( + request_text=http_response.content, limit=None, site=self.SITE ) # Convert JSON data to a class object return torrent.json_to_class(json_data) - def get(self, view_id): + @classmethod + def get(self, view_id: int): r = requests.get(f"{self.URL}/view/{view_id}") r.raise_for_status() - json_data = utils.parse_single(request_text=r.text, site=self.SITE) + json_data = parse_single(request_text=r.content, site=self.SITE) return torrent.json_to_class(json_data) - def get_user(self, username): + @classmethod + def get_from_user(self, username): r = requests.get(f"{self.URL}/user/{username}") r.raise_for_status() - json_data = utils.parse_nyaa(request_text=r.text, limit=None, site=self.SITE) + json_data = parse_nyaa(request_text=r.content, limit=None, site=self.SITE) return torrent.json_to_class(json_data) diff --git a/nyaapy/magnet.py b/nyaapy/magnet.py new file mode 100644 index 0000000..f89849b --- /dev/null +++ b/nyaapy/magnet.py @@ -0,0 +1,22 @@ +import urllib +from urllib.parse import urlencode + +def magnet_builder(info_hash, title): + """ + Generates a magnet link using the info_hash and title of a given file. + """ + known_trackers = [ + "http://nyaa.tracker.wf:7777/announce", + "udp://open.stealth.si:80/announce", + "udp://tracker.opentrackr.org:1337/announce", + "udp://exodus.desync.com:6969/announce", + "udp://tracker.torrent.eu.org:451/announce", + ] + + magnet_link = f"magnet:?xt=urn:btih:{info_hash}&" + urlencode( + {"dn": title}, quote_via=urllib.parse.quote + ) + for tracker in known_trackers: + magnet_link += f"&{urlencode({'tr': tracker})}" + + return magnet_link \ No newline at end of file diff --git a/nyaapy/nyaasi/__init__.py b/nyaapy/nyaasi/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nyaapy/nyaasi/nyaa.py b/nyaapy/nyaasi/nyaa.py new file mode 100644 index 0000000..76c24ec --- /dev/null +++ b/nyaapy/nyaasi/nyaa.py @@ -0,0 +1,7 @@ +from nyaapy.anime_site import AnimeTorrentSite +from nyaapy.torrent import TorrentSite + + +class Nyaa(AnimeTorrentSite): + SITE = TorrentSite.NYAASI + URL = "https://nyaa.si" \ No newline at end of file diff --git a/nyaapy/nyaasi/sukebei.py b/nyaapy/nyaasi/sukebei.py new file mode 100644 index 0000000..7f63b77 --- /dev/null +++ b/nyaapy/nyaasi/sukebei.py @@ -0,0 +1,6 @@ +from nyaapy.anime_site import AnimeTorrentSite +from nyaapy.torrent import TorrentSite + +class SukebeiNyaa(AnimeTorrentSite): + SITE = TorrentSite.SUKEBEINYAASI + URL = "https://sukebei.nyaa.si" \ No newline at end of file diff --git a/nyaapy/utils.py b/nyaapy/parser.py similarity index 74% rename from nyaapy/utils.py rename to nyaapy/parser.py index 6aa944c..93ddb7f 100644 --- a/nyaapy/utils.py +++ b/nyaapy/parser.py @@ -1,323 +1,247 @@ -import urllib -from enum import Enum -from urllib.parse import urlencode - -from lxml import etree - - -class TorrentSite(Enum): - """ - Contains torrent sites - """ - - NYAASI = "https://nyaa.si" - SUKEBEINYAASI = "https://sukebei.nyaa.si" - - # * nyaa.pantsu.cat redirects to nyaa.net - NYAANET = "https://nyaa.net" - SUKEBEINYAANET = "https://sukebei.nyaa.net" - - -def nyaa_categories(b): - c = b.replace("?c=", "") - cats = c.split("_") - - cat = cats[0] - sub_cat = cats[1] - - categories = { - "1": { - "name": "Anime", - "sub_cats": { - "1": "Anime Music Video", - "2": "English-translated", - "3": "Non-English-translated", - "4": "Raw", - }, - }, - "2": {"name": "Audio", "sub_cats": {"1": "Lossless", "2": "Lossy"}}, - "3": { - "name": "Literature", - "sub_cats": { - "1": "English-translated", - "2": "Non-English-translated", - "3": "Raw", - }, - }, - "4": { - "name": "Live Action", - "sub_cats": { - "1": "English-translated", - "2": "Idol/Promotional Video", - "3": "Non-English-translated", - "4": "Raw", - }, - }, - "5": {"name": "Pictures", "sub_cats": {"1": "Graphics", "2": "Photos"}}, - "6": {"name": "Software", "sub_cats": {"1": "Applications", "2": "Games"}}, - } - - try: - category_name = ( - f"{categories[cat]['name']} - {categories[cat]['sub_cats'][sub_cat]}" - ) - except KeyError: - print("Unable to get Nyaa category name") - return - - return category_name - - -def parse_nyaa_rss(request_text, limit, site): - """ - Extracts torrent information from a given rss response. - """ - root = etree.fromstring(request_text) - torrents = [] - - for item in root.xpath("channel/item")[:limit]: - # Decide category. - if site in [TorrentSite.NYAASI, TorrentSite.NYAANET]: - category = item.findtext("nyaa:categoryId", namespaces=item.nsmap) - elif site in [TorrentSite.SUKEBEINYAASI, TorrentSite.SUKEBEINYAANET]: - category = item.findtext("nyaa:categoryId", namespaces=item.nsmap) - else: - raise ValueError("Unknown TorrentSite received!") - - try: - is_remake = item.findtext("nyaa:remake", namespaces=item.nsmap) == "Yes" - is_trusted = item.findtext("nyaa:trusted", namespaces=item.nsmap) == "Yes" - item_type = ( - "remake" if is_remake else "trusted" if is_trusted else "default" - ) - - torrent = { - "id": item.findtext("guid").split("/")[-1], - "category": category, - "url": item.findtext("guid"), - "name": item.findtext("title"), - "download_url": item.findtext("link"), - "magnet": magnet_builder( - item.findtext("nyaa:infoHash", namespaces=item.nsmap), - item.findtext("title"), - ), - "size": item.findtext("nyaa:size", namespaces=item.nsmap), - "date": item.findtext("pubDate"), - "seeders": item.findtext("nyaa:seeders", namespaces=item.nsmap), - "leechers": item.findtext("nyaa:leechers", namespaces=item.nsmap), - "completed_downloads": None, - "type": item_type, - } - torrents.append(torrent) - except IndexError: - pass - - return torrents - - -def parse_nyaa(request_text, limit, site): - parser = etree.HTMLParser() - tree = etree.fromstring(request_text, parser) - - # Put proper domain here. - uri = site.value - - torrents = [] - - # Going through table rows - for tr in tree.xpath("//tbody//tr")[:limit]: - block = [] - - for td in tr.xpath("./td"): - for link in td.xpath("./a"): - - href = link.attrib.get("href").split("/")[-1] - - # Only caring about non-comment pages. - if href[-9:] != "#comments": - block.append(href) - - if link.text and link.text.strip(): - block.append(link.text.strip()) - - if td.text is not None and td.text.strip(): - block.append(td.text.strip()) - - # Add type of torrent based on tr class. - if tr.attrib.get("class") is not None: - if "danger" in tr.attrib.get("class"): - block.append("remake") - elif "success" in tr.attrib.get("class"): - block.append("trusted") - else: - block.append("default") - else: - block.append("default") - - # Decide category. - if site in [TorrentSite.NYAASI, TorrentSite.NYAANET]: - category = nyaa_categories(block[0]) - elif site in [TorrentSite.SUKEBEINYAASI, TorrentSite.SUKEBEINYAANET]: - category = sukebei_categories(block[0]) - else: - raise ValueError("Unknown TorrentSite received!") - - # Create torrent object - try: - torrent = { - "id": block[1], - "category": category, - "url": "{}/view/{}".format(uri, block[1]), - "name": block[2], - "download_url": "{}/download/{}".format(uri, block[3]), - "magnet": block[4], - "size": block[5], - "date": block[6], - "seeders": block[7], - "leechers": block[8], - "completed_downloads": block[9], - "type": block[10], - } - torrents.append(torrent) - except IndexError: - pass - return torrents - - -def parse_single(request_text, site): - parser = etree.HTMLParser() - tree = etree.fromstring(request_text, parser) - - # Put proper domain here. - uri = site.value - - torrent = {} - data = [] - torrent_files = [] - - # Find basic uploader info & torrent stats - for row in tree.xpath("//div[@class='row']"): - for div_text in row.xpath("./div[@class='col-md-5']//text()"): - d = div_text.strip() - if d: - data.append(d) - - # Find files, we need only text of the li element(s). - # Sorry about Pycodestyle aka PEP8 (E501) error - for el in tree.xpath("//div[contains(@class, 'torrent-file-list')]//li/text()"): - if el.rstrip(): - torrent_files.append(el) - - torrent["title"] = tree.xpath("//h3[@class='panel-title']/text()")[0].strip() - torrent["category"] = data[0] - torrent["uploader"] = data[4] - torrent["uploader_profile"] = "{}/user/{}".format(uri, data[4]) - torrent["website"] = data[6] - torrent["size"] = data[8] - torrent["date"] = data[3] - torrent["seeders"] = data[5] - torrent["leechers"] = data[7] - torrent["completed"] = data[9] - torrent["hash"] = data[10] - torrent["files"] = torrent_files - - torrent["description"] = "" - for s in tree.xpath("//div[@id='torrent-description']"): - torrent["description"] += s.text - - return torrent - - -def sukebei_categories(b): - c = b.replace("?c=", "") - cats = c.split("_") - - cat = cats[0] - subcat = cats[1] - - categories = { - "1": { - "name": "Art", - "subcats": { - "1": "Anime", - "2": "Doujinshi", - "3": "Games", - "4": "Manga", - "5": "Pictures", - }, - }, - "2": { - "name": "Real Life", - "subcats": {"1": "Photobooks & Pictures", "2": "Videos"}, - }, - } - - try: - category_name = ( - f"{categories[cat]['name']} - {categories[cat]['subcats'][subcat]}" - ) - except KeyError: - print("Unable to get Sukebei category name") - return - - return category_name - - -def magnet_builder(info_hash, title): - """ - Generates a magnet link using the info_hash and title of a given file. - """ - known_trackers = [ - "http://nyaa.tracker.wf:7777/announce", - "udp://open.stealth.si:80/announce", - "udp://tracker.opentrackr.org:1337/announce", - "udp://exodus.desync.com:6969/announce", - "udp://tracker.torrent.eu.org:451/announce", - ] - - magnet_link = f"magnet:?xt=urn:btih:{info_hash}&" + urlencode( - {"dn": title}, quote_via=urllib.parse.quote - ) - for tracker in known_trackers: - magnet_link += f"&{urlencode({'tr': tracker})}" - - return magnet_link - - -# Pantsu Utils -def query_builder(q, params): - available_params = [ - "category", - "page", - "limit", - "userID", - "fromID", - "status", - "maxage", - "toDate", - "fromDate", - "dateType", - "minSize", - "maxSize", - "sizeType", - "sort", - "order", - "lang", - ] - query = "?q={}".format(q.replace(" ", "+")) - - for param, value in params.items(): - if param in available_params: - if param != "category" and param != "status" and param != "lang": - query += "&{}={}".format(param, value) - elif param == "category": - query += "&c={}_{}".format(value[0], value[1]) - - elif param == "status": - query += "&s={}".format(value) - - elif param == "lang": - for lang in value: - query += "&lang={}".format(lang) - - return query +from lxml import etree +from nyaapy.magnet import magnet_builder +from nyaapy.torrent import TorrentSite + +def nyaa_categories(b): + c = b.replace("?c=", "") + cats = c.split("_") + + cat = cats[0] + sub_cat = cats[1] + + categories = { + "1": { + "name": "Anime", + "sub_cats": { + "1": "Anime Music Video", + "2": "English-translated", + "3": "Non-English-translated", + "4": "Raw", + }, + }, + "2": {"name": "Audio", "sub_cats": {"1": "Lossless", "2": "Lossy"}}, + "3": { + "name": "Literature", + "sub_cats": { + "1": "English-translated", + "2": "Non-English-translated", + "3": "Raw", + }, + }, + "4": { + "name": "Live Action", + "sub_cats": { + "1": "English-translated", + "2": "Idol/Promotional Video", + "3": "Non-English-translated", + "4": "Raw", + }, + }, + "5": {"name": "Pictures", "sub_cats": {"1": "Graphics", "2": "Photos"}}, + "6": {"name": "Software", "sub_cats": {"1": "Applications", "2": "Games"}}, + } + + try: + category_name = ( + f"{categories[cat]['name']} - {categories[cat]['sub_cats'][sub_cat]}" + ) + except KeyError: + print("Unable to get Nyaa category name") + return + + return category_name + + +def parse_nyaa_rss(request_text, limit, site): + """ + Extracts torrent information from a given rss response. + """ + root = etree.fromstring(request_text) + torrents = [] + + for item in root.xpath("channel/item")[:limit]: + # Decide category. + if site in [TorrentSite.NYAASI, TorrentSite.NYAALAND]: + category = item.findtext("nyaa:categoryId", namespaces=item.nsmap) + elif site in [TorrentSite.SUKEBEINYAASI, TorrentSite.SUKEBEINYAALAND]: + category = item.findtext("nyaa:categoryId", namespaces=item.nsmap) + else: + raise ValueError("Unknown TorrentSite received!") + + try: + is_remake = item.findtext("nyaa:remake", namespaces=item.nsmap) == "Yes" + is_trusted = item.findtext("nyaa:trusted", namespaces=item.nsmap) == "Yes" + item_type = ( + "remake" if is_remake else "trusted" if is_trusted else "default" + ) + + torrent = { + "id": item.findtext("guid").split("/")[-1], + "category": category, + "url": item.findtext("guid"), + "name": item.findtext("title"), + "download_url": item.findtext("link"), + "magnet": magnet_builder( + item.findtext("nyaa:infoHash", namespaces=item.nsmap), + item.findtext("title"), + ), + "size": item.findtext("nyaa:size", namespaces=item.nsmap), + "date": item.findtext("pubDate"), + "seeders": item.findtext("nyaa:seeders", namespaces=item.nsmap), + "leechers": item.findtext("nyaa:leechers", namespaces=item.nsmap), + "completed_downloads": None, + "type": item_type, + } + torrents.append(torrent) + except IndexError: + pass + + return torrents + + +def parse_nyaa(request_text, limit, site): + parser = etree.HTMLParser() + tree = etree.fromstring(request_text, parser) + + # Put proper domain here. + uri = site.value + + torrents = [] + + # Going through table rows + for tr in tree.xpath("//tbody//tr")[:limit]: + block = [] + + for td in tr.xpath("./td"): + for link in td.xpath("./a"): + + href = link.attrib.get("href").split("/")[-1] + + # Only caring about non-comment pages. + if href[-9:] != "#comments": + block.append(href) + + if link.text and link.text.strip(): + block.append(link.text.strip()) + + if td.text is not None and td.text.strip(): + block.append(td.text.strip()) + + # Add type of torrent based on tr class. + if tr.attrib.get("class") is not None: + if "danger" in tr.attrib.get("class"): + block.append("remake") + elif "success" in tr.attrib.get("class"): + block.append("trusted") + else: + block.append("default") + else: + block.append("default") + + # Decide category. + if site in [TorrentSite.NYAASI, TorrentSite.NYAALAND]: + category = nyaa_categories(block[0]) + elif site is TorrentSite.SUKEBEINYAASI: + category = sukebei_categories(block[0]) + else: + raise ValueError("Unknown TorrentSite received!") + + # Create torrent object + try: + torrent = { + "id": block[1], + "category": category, + "url": "{}/view/{}".format(uri, block[1]), + "name": block[2], + "download_url": "{}/download/{}".format(uri, block[3]), + "magnet": block[4], + "size": block[5], + "date": block[6], + "seeders": block[7], + "leechers": block[8], + "completed_downloads": block[9], + "type": block[10], + } + torrents.append(torrent) + except IndexError: + pass + return torrents + + +def parse_single(request_text, site): + parser = etree.HTMLParser() + tree = etree.fromstring(request_text, parser) + + # Put proper domain here. + uri = site.value + + torrent = {} + data = [] + torrent_files = [] + + # Find basic uploader info & torrent stats + for row in tree.xpath("//div[@class='row']"): + for div_text in row.xpath("./div[@class='col-md-5']//text()"): + d = div_text.strip() + if d: + data.append(d) + + # Find files, we need only text of the li element(s). + # Sorry about Pycodestyle aka PEP8 (E501) error + for el in tree.xpath("//div[contains(@class, 'torrent-file-list')]//li/text()"): + if el.rstrip(): + torrent_files.append(el) + + torrent["title"] = tree.xpath("//h3[@class='panel-title']/text()")[0].strip() + torrent["category"] = data[0] + torrent["uploader"] = data[4] + torrent["uploader_profile"] = "{}/user/{}".format(uri, data[4]) + torrent["website"] = data[6] + torrent["size"] = data[8] + torrent["date"] = data[3] + torrent["seeders"] = data[5] + torrent["leechers"] = data[7] + torrent["completed"] = data[9] + torrent["hash"] = data[10] + torrent["files"] = torrent_files + + torrent["description"] = "" + for s in tree.xpath("//div[@id='torrent-description']"): + torrent["description"] += s.text + + return torrent + + +def sukebei_categories(b): + c = b.replace("?c=", "") + cats = c.split("_") + + cat = cats[0] + subcat = cats[1] + + categories = { + "1": { + "name": "Art", + "subcats": { + "1": "Anime", + "2": "Doujinshi", + "3": "Games", + "4": "Manga", + "5": "Pictures", + }, + }, + "2": { + "name": "Real Life", + "subcats": {"1": "Photobooks & Pictures", "2": "Videos"}, + }, + } + + try: + category_name = ( + f"{categories[cat]['name']} - {categories[cat]['subcats'][subcat]}" + ) + except KeyError: + print("Unable to get Sukebei category name") + return + + return category_name diff --git a/nyaapy/sukebei.py b/nyaapy/sukebei.py deleted file mode 100644 index 08a38b8..0000000 --- a/nyaapy/sukebei.py +++ /dev/null @@ -1,49 +0,0 @@ -import requests -from nyaapy import utils - - -class SukebeiNyaa: - - def __init__(self): - self.SITE = utils.TorrentSite.SUKEBEINYAASI - - def search(self, keyword, **kwargs): - uri = self.SITE.value - category = kwargs.get("category", 0) - subcategory = kwargs.get("subcategory", 0) - filters = kwargs.get("filters", 0) - page = kwargs.get("page", 0) - - if page > 0: - r = requests.get( - "{}/?f={}&c={}_{}&q={}&p={}".format( - uri, filters, category, subcategory, keyword, page - ) - ) - else: - r = requests.get( - "{}/?f={}&c={}_{}&q={}".format( - uri, filters, category, subcategory, keyword - ) - ) - - r.raise_for_status() - return utils.parse_nyaa(r.text, limit=None, site=self.SITE) - - def get(self, id): - r = requests.get("{}/view/{}".format(self.SITE.value, id)) - r.raise_for_status() - - return utils.parse_single(r.text, self.SITE) - - def get_user(self, username): - r = requests.get("{}/user/{}".format(self.SITE.value, username)) - r.raise_for_status() - - return utils.parse_nyaa(r.text, limit=None, site=self.SITE) - - def last_uploads(self, number_of_results): - r = requests.get(self.SITE.value) - r.raise_for_status() - - return utils.parse_nyaa(r.text, limit=number_of_results + 1, site=self.SITE) diff --git a/nyaapy/torrent.py b/nyaapy/torrent.py index 8269c05..a5af209 100644 --- a/nyaapy/torrent.py +++ b/nyaapy/torrent.py @@ -1,3 +1,6 @@ +from enum import Enum + + def json_to_class(data): # We check if the data passed is a list or not if isinstance(data, list): @@ -9,9 +12,18 @@ def json_to_class(data): else: return Torrent(data) - # This deals with converting the dict to an object class Torrent(object): def __init__(self, my_dict): for key in my_dict: setattr(self, key, my_dict[key]) + +class TorrentSite(Enum): + """ + Contains torrent sites + """ + + NYAASI = "https://nyaa.si" + SUKEBEINYAASI = "https://sukebei.nyaa.si" + + NYAALAND = "https://nyaa.land" \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 600263d..d0ec109 100644 --- a/poetry.lock +++ b/poetry.lock @@ -599,4 +599,4 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "bd09d4b9f6f3ae48c750ce8dbd1bacc8b75e265b4363f77bf095962f9d1ebeac" +content-hash = "cb48b1a114a5cd1aa44d635f91c49ce640137746939a4feadb5615f8cfc7cf8b" diff --git a/tests/integration/test_nyaasi.py b/tests/integration/test_nyaasi.py new file mode 100644 index 0000000..e05b2ff --- /dev/null +++ b/tests/integration/test_nyaasi.py @@ -0,0 +1,30 @@ +from nyaapy.nyaasi.nyaa import Nyaa +from nyaapy.torrent import Torrent + +def test_nyaa_last_uploads(): + request = Nyaa.last_uploads(number_of_results=10) + torrent = request[0] + + assert isinstance(torrent, Torrent) == True + assert len(request) == 10 + + +def test_nyaa_search(): + request = Nyaa.search(keyword="koe no katachi") + torrent = request[0] + + assert isinstance(torrent, Torrent) == True + + +def test_nyaa_get_single(): + request = Nyaa.get(view_id='1847113') + + assert isinstance(request, Torrent) == True + + +def test_nyaa_get_from_user(): + request = Nyaa.get_from_user(username="Erai-raws") + torrent = request[0] + + assert isinstance(torrent, Torrent) == True + assert len(request) <= 75 \ No newline at end of file diff --git a/tests/test.py b/tests/test.py deleted file mode 100644 index b473619..0000000 --- a/tests/test.py +++ /dev/null @@ -1,74 +0,0 @@ -from nyaapy.nyaa import Nyaa -from pprint import pprint -from datetime import datetime -import json -import sys -import os - -# Creating a folder for test_files -# ! not included in github project. -if not os.path.isdir("test_files"): - os.makedirs("test_files") - -nyaa = Nyaa() - -# Get fresh torrents -dt_latest_torrents_begin = datetime.now() -latest_torrents = nyaa.last_uploads(100) -dt_latest_torrents_end = datetime.now() -with open("test_files/nyaa_latest_torrent_test.json", "w") as f: - for torrent in latest_torrents: - try: - # This prints it as byte like objects since unicode is fun - f.write(str(torrent.name.encode("utf-8")) + "\n") - except AttributeError: - f.write("No name found for this torrent") - -# Search some nasty stuff -dt_search_begin = datetime.now() -test_search = nyaa.search("kimi no na wa") -dt_search_end = datetime.now() -with open("test_files/nyaa_search_test.json", "w") as f: - for torrent in test_search: - try: - # This prints it as byte like objects since unicode is fun - f.write(str(torrent.name.encode("utf-8")) + "\n") - except AttributeError: - f.write("No name found for this torrent") - -# Get first torrent from found torrents -dt_single_torrent_begin = datetime.now() -single_torrent = test_search[0] -dt_single_torrent_end = datetime.now() -with open("test_files/nyaa_single_torrent_test.json", "w") as f: - try: - # This prints it as byte like objects since unicode is fun - f.write(str(torrent.name.encode("utf-8")) + "\n") - except AttributeError: - f.write("No name found for this torrent") - -dt_user_begin = datetime.now() -user_torrents = nyaa.get_user("HorribleSubs") -dt_user_end = datetime.now() -with open("test_files/nyaa_single_user_test.json", "w") as f: - for torrent in user_torrents: - try: - # This prints it as byte like objects since unicode is fun - f.write(str(torrent.name.encode("utf-8")) + "\n") - except AttributeError: - f.write("No name found for this torrent") - -print( - "Latest torrents time:", - (dt_latest_torrents_end - dt_latest_torrents_begin).microseconds / 1000, - "msec", -) -print( - "Test search time:", (dt_search_end - dt_search_begin).microseconds / 1000, "msec" -) -print( - "Single torrent time:", - (dt_single_torrent_end - dt_single_torrent_begin).microseconds / 1000, - "msec", -) -print("Single user time:", (dt_user_end - dt_user_begin).microseconds / 1000, "msec") diff --git a/tests/test_pantsu.py b/tests/test_pantsu.py deleted file mode 100644 index fdaf4e8..0000000 --- a/tests/test_pantsu.py +++ /dev/null @@ -1,7 +0,0 @@ -""" -* Pantsu need some serious work -Regular data single_torrent parser not working from other Nyaa alternatives -Needs some work -""" - -print("TODO") diff --git a/tests/test_sukebei.py b/tests/test_sukebei.py deleted file mode 100644 index add9179..0000000 --- a/tests/test_sukebei.py +++ /dev/null @@ -1,53 +0,0 @@ -from nyaapy.sukebei import SukebeiNyaa -from datetime import datetime -import json -import os - -# Creating a folder for test_files -# ! not included in github project. -if not os.path.isdir("test_files"): - os.makedirs("test_files") - -nyaa = SukebeiNyaa() - -# Get fresh torrents -dt_latest_torrents_begin = datetime.now() -latest_torrents = nyaa.last_uploads(100) -dt_latest_torrents_end = datetime.now() -with open("test_files/sukebei_latest_torrent_test.json", "w") as f: - json.dump(latest_torrents, f) - -# Search some nasty stuff -dt_search_begin = datetime.now() -test_search = nyaa.search("G Senjou no maou") -dt_search_end = datetime.now() -with open("test_files/sukebei_search_test.json", "w") as f: - json.dump(test_search, f) - -# Get first torrent from found torrents -dt_single_torrent_begin = datetime.now() -single_torrent = nyaa.get(test_search[0]["id"]) -dt_single_torrent_end = datetime.now() -with open("test_files/sukebei_single_torrent_test.json", "w") as f: - json.dump(single_torrent, f) - -dt_user_begin = datetime.now() -user_torrents = nyaa.get_user("RUNBKK") -dt_user_end = datetime.now() -with open("test_files/sukebei_single_user_test.json", "w") as f: - json.dump(user_torrents, f) - -print( - "Latest torrents time:", - (dt_latest_torrents_end - dt_latest_torrents_begin).microseconds / 1000, - "msec", -) -print( - "Test search time:", (dt_search_end - dt_search_begin).microseconds / 1000, "msec" -) -print( - "Single torrent time:", - (dt_single_torrent_end - dt_single_torrent_begin).microseconds / 1000, - "msec", -) -print("Single user time:", (dt_user_end - dt_user_begin).microseconds / 1000, "msec")