Merge pull request #54 from JuanjoSalvador/master

Development version of 0.7.0
This commit is contained in:
Juanjo Salvador
2020-12-13 23:40:17 +01:00
committed by GitHub
12 changed files with 524 additions and 300 deletions

5
.gitignore vendored
View File

@@ -3,4 +3,7 @@ dist/
nyaapy.egg-info nyaapy.egg-info
.vscode .vscode
env/ env/
*.pyc *.pyc
test_files
venv
.idea

View File

@@ -4,8 +4,27 @@
1. Star the repo, it will help me a lot. 1. Star the repo, it will help me a lot.
2. Make a fork for you. 2. Make a fork for you.
3. Use the `dev` branch, never master. 3. Clone the repo into your local machine.
4. Create a new branch for your changes.
5. Start hacking :-)
## Not familiarized with the Python workflow?
1. Be sure that you have Python 3 and virtualenv installed (if not, install them)
2. Create a new virtualenv
```
python -m virtualenv env -p python3
```
3. And activate it!
4. Now it's time to install the dependencies.
```
pip install -r requirements.txt
```
5. And now you're ready to hack.
## Hacking ## Hacking

View File

@@ -1,11 +1,7 @@
# Info about the module # Info about the module
__version__ = '0.6.0' __version__ = '0.6.3'
__author__ = 'Juanjo Salvador' __author__ = 'Juanjo Salvador'
__email__ = 'juanjosalvador@netc.eu' __email__ = 'juanjosalvador@netc.eu'
__url__ = 'http://juanjosalvador.me' __url__ = 'http://juanjosalvador.me'
__copyright__ = '2017 Juanjo Salvador' __copyright__ = '2017 Juanjo Salvador'
__license__ = 'MIT license' __license__ = 'MIT license'
from NyaaPy.nyaa import Nyaa
from NyaaPy.pantsu import Pantsu
from NyaaPy.sukebei import SukebeiNyaa, SukebeiPantsu

View File

@@ -1,21 +1,30 @@
import requests import requests
import urllib.parse
from bs4 import BeautifulSoup
from NyaaPy import utils from NyaaPy import utils
from NyaaPy import torrent
class Nyaa: class Nyaa:
def __init__(self): def __init__(self):
self.URI = "http://nyaa.si" self.SITE = utils.TorrentSite.NYAASI
self.URL = "https://nyaa.si"
def last_uploads(self, number_of_results): def last_uploads(self, number_of_results):
r = requests.get(self.URI) r = requests.get(self.URL)
soup = BeautifulSoup(r.text, 'html.parser')
rows = soup.select('table tr')
return utils.parse_nyaa(table_rows=rows, limit=number_of_results + 1) # If anything up with nyaa servers let the user know.
r.raise_for_status()
json_data = utils.parse_nyaa(
request_text=r.text,
limit=number_of_results + 1,
site=self.SITE
)
return torrent.json_to_class(json_data)
def search(self, keyword, **kwargs): def search(self, keyword, **kwargs):
url = self.URL
user = kwargs.get('user', None) user = kwargs.get('user', None)
category = kwargs.get('category', 0) category = kwargs.get('category', 0)
subcategory = kwargs.get('subcategory', 0) subcategory = kwargs.get('subcategory', 0)
@@ -23,32 +32,43 @@ class Nyaa:
page = kwargs.get('page', 0) page = kwargs.get('page', 0)
if user: if user:
user_uri = "user/{}".format(user) user_uri = f"user/{user}"
else: else:
user_uri = "" user_uri = ""
if page > 0: if page > 0:
r = requests.get("{}/{}?f={}&c={}_{}&q={}&p={}".format( r = requests.get("{}/{}?f={}&c={}_{}&q={}&p={}".format(
self.URI, user_uri, filters, category, subcategory, keyword, url, user_uri, filters, category, subcategory, keyword,
page)) page))
else: else:
r = requests.get("{}/{}?f={}&c={}_{}&q={}".format( r = requests.get("{}/{}?f={}&c={}_{}&q={}".format(
self.URI, user_uri, filters, category, subcategory, keyword)) url, user_uri, filters, category, subcategory, keyword))
soup = BeautifulSoup(r.text, 'html.parser') r.raise_for_status()
rows = soup.select('table tr')
return utils.parse_nyaa(rows, limit=None) json_data = utils.parse_nyaa(
request_text=r.text,
limit=None,
site=self.SITE
)
def get(self, id): return torrent.json_to_class(json_data)
r = requests.get("{}/view/{}".format(self.URI, id))
soup = BeautifulSoup(r.text, 'html.parser')
content = soup.findAll("div", {"class": "panel", "id": None})
return utils.parse_single(content) def get(self, view_id):
r = requests.get(f'{self.URL}/view/{view_id}')
r.raise_for_status()
json_data = utils.parse_single(request_text=r.text, site=self.SITE)
return torrent.json_to_class(json_data)
def get_user(self, username): def get_user(self, username):
r = requests.get("{}/user/{}".format(self.URI, username)) r = requests.get(f'{self.URL}/user/{username}')
soup = BeautifulSoup(r.text, 'html.parser') r.raise_for_status()
return utils.parse_nyaa(soup.select('table tr'), limit=None) json_data = utils.parse_nyaa(
request_text=r.text,
limit=None,
site=self.SITE
)
return torrent.json_to_class(json_data)

View File

@@ -1,10 +1,24 @@
import requests import requests
from NyaaPy import utils from NyaaPy import utils
class Pantsu: class Pantsu:
def __init__(self): def __init__(self):
self.BASE_URL = "https://nyaa.pantsu.cat/api" self.BASE_URL = "https://nyaa.pantsu.cat/api"
self.SITE = utils.TorrentSite.NYAANET
def last_uploads(self, number_of_results):
r = requests.get(self.SITE.value)
r.raise_for_status()
with open("test.html", "w") as f:
f.write(r.text)
return utils.parse_nyaa(
request_text=r.text,
limit=number_of_results + 1,
site=self.SITE
)
# Torrents - GET # Torrents - GET
def search(self, keyword, **kwargs): def search(self, keyword, **kwargs):
@@ -15,10 +29,11 @@ class Pantsu:
def view(self, item_id): def view(self, item_id):
request = requests.get("{}/view/{}".format(self.BASE_URL, item_id)) request = requests.get("{}/view/{}".format(self.BASE_URL, item_id))
request.raise_for_status()
return request.json() return request.json()
# Torrents - POST # Torrents - POST
def upload(self): def upload(self):
return "Work in progress!" return "Work in progress!"
@@ -26,7 +41,6 @@ class Pantsu:
return "Work in progress!" return "Work in progress!"
# Users # Users
def login(self, username, password): def login(self, username, password):
login = requests.post("{}/login/".format( login = requests.post("{}/login/".format(
self.BASE_URL), data={'username': username, 'password': password}) self.BASE_URL), data={'username': username, 'password': password})

View File

@@ -1,9 +1,14 @@
import requests import requests
from bs4 import BeautifulSoup
from NyaaPy import utils from NyaaPy import utils
class SukebeiNyaa: class SukebeiNyaa:
def __init__(self):
self.SITE = utils.TorrentSite.SUKEBEINYAASI
def search(self, keyword, **kwargs): def search(self, keyword, **kwargs):
uri = self.SITE.value
category = kwargs.get('category', 0) category = kwargs.get('category', 0)
subcategory = kwargs.get('subcategory', 0) subcategory = kwargs.get('subcategory', 0)
filters = kwargs.get('filters', 0) filters = kwargs.get('filters', 0)
@@ -11,37 +16,37 @@ class SukebeiNyaa:
if page > 0: if page > 0:
r = requests.get("{}/?f={}&c={}_{}&q={}&p={}".format( r = requests.get("{}/?f={}&c={}_{}&q={}&p={}".format(
"http://sukebei.nyaa.si", filters, category, subcategory, uri, filters, category, subcategory,
keyword, page)) keyword, page))
else: else:
r = requests.get("{}/?f={}&c={}_{}&q={}".format( r = requests.get("{}/?f={}&c={}_{}&q={}".format(
"http://sukebei.nyaa.si", filters, category, subcategory, uri, filters, category, subcategory,
keyword)) keyword))
soup = BeautifulSoup(r.text, 'html.parser') r.raise_for_status()
rows = soup.select('table tr') return utils.parse_nyaa(r.text, limit=None, site=self.SITE)
return utils.parse_nyaa(rows, limit=None)
def get(self, id): def get(self, id):
r = requests.get("http://sukebei.nyaa.si/view/{}".format(id)) r = requests.get("{}/view/{}".format(self.SITE.value, id))
soup = BeautifulSoup(r.text, 'html.parser') r.raise_for_status()
content = soup.findAll("div", {"class": "panel", "id": None})
return utils.parse_single(content) return utils.parse_single(r.text, self.SITE)
def get_user(self, username): def get_user(self, username):
r = requests.get("http://sukebei.nyaa.si/user/{}".format(username)) r = requests.get("{}/user/{}".format(self.SITE.value, username))
soup = BeautifulSoup(r.text, 'html.parser') r.raise_for_status()
return utils.parse_nyaa(soup.select('table tr'), limit=None) return utils.parse_nyaa(r.text, limit=None, site=self.SITE)
def news(self, number_of_results): def last_uploads(self, number_of_results):
r = requests.get("http://sukebei.nyaa.si/") r = requests.get(self.SITE.value)
soup = BeautifulSoup(r.text, 'html.parser') r.raise_for_status()
rows = soup.select('table tr')
return utils.parse_sukebei(rows, limit=number_of_results + 1) return utils.parse_nyaa(
r.text,
limit=number_of_results + 1,
site=self.SITE
)
class SukebeiPantsu: class SukebeiPantsu:

17
NyaaPy/torrent.py Normal file
View File

@@ -0,0 +1,17 @@
def json_to_class(data):
# We check if the data passed is a list or not
if isinstance(data, list):
object_list = []
for item in data:
object_list.append(Torrent(item))
# Return a list of Torrent objects
return object_list
else:
return Torrent(data)
# This deals with converting the dict to an object
class Torrent(object):
def __init__(self, my_dict):
for key in my_dict:
setattr(self, key, my_dict[key])

View File

@@ -1,242 +1,256 @@
''' from enum import Enum
Module utils from lxml import etree
'''
import re class TorrentSite(Enum):
"""
def nyaa_categories(b): Contains torrent sites
c = b.replace('/?c=', '') """
cats = c.split('_') NYAASI = "https://nyaa.si"
SUKEBEINYAASI = "https://sukebei.nyaa.si"
cat = cats[0]
subcat = cats[1] # * nyaa.pantsu.cat redirects to nyaa.net
NYAANET = "https://nyaa.net"
categories = { SUKEBEINYAANET = "https://sukebei.nyaa.net"
"1": {
"name": "Anime",
"subcats": { def nyaa_categories(b):
"1": "Anime Music Video", c = b.replace('?c=', '')
"2": "English-translated", cats = c.split('_')
"3": "Non-English-translated",
"4": "Raw" cat = cats[0]
} sub_cat = cats[1]
},
"2": { categories = {
"name": "Audio", "1": {
"subcats": { "name": "Anime",
"1": "Lossless", "sub_cats": {
"2": "Lossy" "1": "Anime Music Video",
} "2": "English-translated",
}, "3": "Non-English-translated",
"3": { "4": "Raw"
"name": "Literature", }
"subcats": { },
"1": "English-translated", "2": {
"2": "Non-English-translated", "name": "Audio",
"3": "Raw" "sub_cats": {
} "1": "Lossless",
}, "2": "Lossy"
"4": { }
"name": "Live Action", },
"subcats": { "3": {
"1": "English-translated", "name": "Literature",
"2": "Idol/Promotional Video", "sub_cats": {
"3": "Non-English-translated", "1": "English-translated",
"4": "Raw" "2": "Non-English-translated",
} "3": "Raw"
}, }
"5": { },
"name": "Pictures", "4": {
"subcats": { "name": "Live Action",
"1": "Graphics", "sub_cats": {
"2": "Photos" "1": "English-translated",
} "2": "Idol/Promotional Video",
}, "3": "Non-English-translated",
"6": { "4": "Raw"
"name": "Software", }
"subcats": { },
"1": "Applications", "5": {
"2": "Games" "name": "Pictures",
} "sub_cats": {
} "1": "Graphics",
} "2": "Photos"
}
try: },
category_name = "{} - {}".format( "6": {
categories[cat]['name'], categories[cat]['subcats'][subcat]) "name": "Software",
except Exception: "sub_cats": {
pass "1": "Applications",
"2": "Games"
return category_name }
}
def parse_nyaa(table_rows, limit): }
if limit == 0:
limit = len(table_rows) try:
category_name = f"{categories[cat]['name']} - {categories[cat]['sub_cats'][sub_cat]}"
torrents = [] except KeyError:
print("Unable to get Nyaa category name")
for row in table_rows[:limit]: return
block = []
return category_name
for td in row.find_all('td'):
if td.find_all('a'):
for link in td.find_all('a'): def parse_nyaa(request_text, limit, site):
if link.get('href')[-9:] != '#comments': parser = etree.HTMLParser()
block.append(link.get('href')) tree = etree.fromstring(request_text, parser)
if link.text.rstrip():
block.append(link.text) # Put proper domain here.
uri = site.value
if td.text.rstrip():
block.append(td.text.rstrip()) torrents = []
try: # Going through table rows
torrent = { for tr in tree.xpath("//tbody//tr")[:limit]:
'id': block[1].replace("/view/", ""), block = []
'category': nyaa_categories(block[0]),
'url': "http://nyaa.si{}".format(block[1]), for td in tr.xpath("./td"):
'name': block[2], for link in td.xpath("./a"):
'download_url': "http://nyaa.si{}".format(block[4]),
'magnet': block[5], href = link.attrib.get("href").split('/')[-1]
'size': block[6],
'date': block[7], # Only caring about non-comment pages.
'seeders': block[8], if href[-9:] != "#comments":
'leechers': block[9], block.append(href)
'completed_downloads': block[10],
} if link.text and link.text.strip():
block.append(link.text.strip())
torrents.append(torrent)
except IndexError as ie: if td.text is not None and td.text.strip():
pass block.append(td.text.strip())
return torrents # Add type of torrent based on tr class.
if tr.attrib.get("class") is not None:
def parse_single(content): if 'danger' in tr.attrib.get("class"):
torrent = {} block.append("remake")
data = [] elif 'success' in tr.attrib.get("class"):
torrent_files = [] block.append("trusted")
else:
for row in content[0].find_all('div', {'class': 'row'}): block.append("default")
for div in row.find_all('div', {'class': 'col-md-5'}): else:
data.append(div.text.replace("\n", "")) block.append("default")
files = content[2].find('div', # Decide category.
{'class', 'torrent-file-list'}).find_all('li') if site in [TorrentSite.NYAASI, TorrentSite.NYAANET]:
category = nyaa_categories(block[0])
for file in files: elif site in [TorrentSite.SUKEBEINYAASI, TorrentSite.SUKEBEINYAANET]:
torrent_files.append(file.text) category = sukebei_categories(block[0])
else:
torrent['title'] = re.sub('\n|\r|\t', '', content[0].find('h3', { raise ValueError("Unknown TorrentSite received!")
"class": "panel-title"}).text.replace("\n", ""))
torrent['category'] = data[0] # Create torrent object
torrent['uploader'] = data[2] try:
torrent['uploader_profile'] = "https://nyaa.si/user/{}".format(data[2]) torrent = {
torrent['website'] = re.sub('\t', '', data[4]) 'id': block[1],
torrent['size'] = data[6] 'category': category,
torrent['date'] = data[1] 'url': "{}/view/{}".format(uri, block[1]),
torrent['seeders'] = data[3] 'name': block[2],
torrent['leechers'] = data[5] 'download_url': "{}/download/{}".format(uri, block[3]),
torrent['completed'] = data[7] 'magnet': block[4],
torrent['hash'] = data[8] 'size': block[5],
torrent['description'] = re.sub('\t', '', content[1].find('div', { 'date': block[6],
'id': 'torrent-description'}).text) 'seeders': block[7],
torrent['files'] = torrent_files 'leechers': block[8],
'completed_downloads': block[9],
return torrent 'type': block[10]
}
# def parse_sukebei(table_rows, limit): torrents.append(torrent)
# if limit == 0: except IndexError:
# limit = len(table_rows) pass
return torrents
# torrents = []
# for row in table_rows[:limit]: def parse_single(request_text, site):
# block = [] parser = etree.HTMLParser()
tree = etree.fromstring(request_text, parser)
# for td in row.find_all('td'):
# for link in td.find_all('a'): # Put proper domain here.
# if link.get('href')[-9:] != '#comments': uri = site.value
# block.append(link.get('href'))
# block.append(link.text.rstrip()) torrent = {}
data = []
# if td.text.rstrip(): torrent_files = []
# block.append(td.text.rstrip())
# Find basic uploader info & torrent stats
# try: for row in tree.xpath("//div[@class='row']"):
# torrent = { for div_text in row.xpath("./div[@class='col-md-5']//text()"):
# 'id': block[1].replace("/view/", ""), d = div_text.strip()
# 'category': sukebei_categories(block[0]), if d:
# 'url': "http://sukebei.nyaa.si{}".format(block[1]), data.append(d)
# 'name': block[2],
# 'download_url': "http://sukebei.nyaa.si{}".format( # Find files, we need only text of the li element(s).
# block[4]), # Sorry about Pycodestyle aka PEP8 (E501) error
# 'magnet': block[5], for el in tree.xpath("//div[contains(@class, 'torrent-file-list')]//li/text()"):
# 'size': block[6], if el.rstrip():
# 'date': block[7], torrent_files.append(el)
# 'seeders': block[8],
# 'leechers': block[9], torrent['title'] = \
# 'completed_downloads': block[10], tree.xpath("//h3[@class='panel-title']/text()")[0].strip()
# } torrent['category'] = data[0]
torrent['uploader'] = data[4]
# torrents.append(torrent) torrent['uploader_profile'] = "{}/user/{}".format(uri, data[4])
torrent['website'] = data[6]
# return torrents torrent['size'] = data[8]
torrent['date'] = data[3]
def sukebei_categories(b): torrent['seeders'] = data[5]
c = b.replace('/?c=', '') torrent['leechers'] = data[7]
cats = c.split('_') torrent['completed'] = data[9]
torrent['hash'] = data[10]
cat = cats[0] torrent['files'] = torrent_files
subcat = cats[1]
torrent['description'] = ""
categories = { for s in tree.xpath("//div[@id='torrent-description']"):
"1": { torrent['description'] += s.text
"name": "Art",
"subcats": { return torrent
"1": "Anime",
"2": "Doujinshi",
"3": "Games", def sukebei_categories(b):
"4": "Manga", c = b.replace('?c=', '')
"5": "Pictures", cats = c.split('_')
}
}, cat = cats[0]
"2": { subcat = cats[1]
"name": "Real Life",
"subcats": { categories = {
"1": "Photobooks & Pictures", "1": {
"2": "Videos" "name": "Art",
} "subcats": {
} "1": "Anime",
} "2": "Doujinshi",
"3": "Games",
try: "4": "Manga",
category_name = "{} - {}".format( "5": "Pictures",
categories[cat]['name'], categories[cat]['subcats'][subcat]) }
except Exception: },
pass "2": {
"name": "Real Life",
return category_name "subcats": {
"1": "Photobooks & Pictures",
# Pantsu Utils "2": "Videos"
def query_builder(q, params): }
available_params = ["category", "page", "limit", "userID", "fromID", }
"status", "maxage", "toDate", "fromDate", }
"dateType", "minSize", "maxSize", "sizeType",
"sort", "order", "lang"] try:
query = "?q={}".format(q.replace(" ", "+")) category_name = f"{categories[cat]['name']} - {categories[cat]['subcats'][subcat]}"
except KeyError:
for param, value in params.items(): print("Unable to get Sukebei category name")
if param in available_params: return
if (param != "category" and param != "status" and
param != "lang"): return category_name
query += "&{}={}".format(param, value)
elif param == "category":
query += "&c={}_{}".format(value[0], value[1]) # Pantsu Utils
def query_builder(q, params):
elif param == "status": available_params = ["category", "page", "limit", "userID", "fromID",
query += "&s={}".format(value) "status", "maxage", "toDate", "fromDate",
"dateType", "minSize", "maxSize", "sizeType",
elif param == "lang": "sort", "order", "lang"]
for lang in value: query = "?q={}".format(q.replace(" ", "+"))
query += "&lang={}".format(lang)
for param, value in params.items():
return query if param in available_params:
if (param != "category" and param != "status" and
param != "lang"):
query += "&{}={}".format(param, value)
elif param == "category":
query += "&c={}_{}".format(value[0], value[1])
elif param == "status":
query += "&s={}".format(value)
elif param == "lang":
for lang in value:
query += "&lang={}".format(lang)
return query

View File

@@ -1,2 +1,3 @@
requests>=2.20.0 requests>=2.20.0
beautifulsoup4==4.6.0 beautifulsoup4==4.6.0
lxml

View File

@@ -1,8 +1,79 @@
from NyaaPy import Pantsu, Nyaa from NyaaPy.nyaa import Nyaa
from pprint import pprint
pantsu = Pantsu() from datetime import datetime
nyaa = Nyaa() import json
import sys
# print(pantsu.search(keyword='koe no katachi', import os
# lang=["es", "ja"], category=[1, 3]))
print(nyaa.search(keyword='yuru camp')) # Creating a folder for test_files
# ! not included in github project.
if not os.path.isdir("test_files"):
os.makedirs("test_files")
nyaa = Nyaa()
# Get fresh torrents
dt_latest_torrents_begin = datetime.now()
latest_torrents = nyaa.last_uploads(100)
dt_latest_torrents_end = datetime.now()
with open("test_files/nyaa_latest_torrent_test.json", 'w') as f:
for torrent in latest_torrents:
try:
# This prints it as byte like objects since unicode is fun
f.write(str(torrent.name.encode('utf-8')) + '\n')
except AttributeError:
f.write('No name found for this torrent')
# Search some nasty stuff
dt_search_begin = datetime.now()
test_search = nyaa.search("kimi no na wa")
dt_search_end = datetime.now()
with open("test_files/nyaa_search_test.json", 'w') as f:
for torrent in test_search:
try:
# This prints it as byte like objects since unicode is fun
f.write(str(torrent.name.encode('utf-8')) + '\n')
except AttributeError:
f.write('No name found for this torrent')
# Get first torrent from found torrents
dt_single_torrent_begin = datetime.now()
single_torrent = test_search[0]
dt_single_torrent_end = datetime.now()
with open("test_files/nyaa_single_torrent_test.json", 'w') as f:
try:
# This prints it as byte like objects since unicode is fun
f.write(str(torrent.name.encode('utf-8')) + '\n')
except AttributeError:
f.write('No name found for this torrent')
dt_user_begin = datetime.now()
user_torrents = nyaa.get_user("HorribleSubs")
dt_user_end = datetime.now()
with open("test_files/nyaa_single_user_test.json", 'w') as f:
for torrent in user_torrents:
try:
# This prints it as byte like objects since unicode is fun
f.write(str(torrent.name.encode('utf-8')) + '\n')
except AttributeError:
f.write('No name found for this torrent')
print(
"Latest torrents time:",
(dt_latest_torrents_end - dt_latest_torrents_begin).microseconds / 1000,
"msec")
print(
"Test search time:",
(dt_search_end - dt_search_begin).microseconds / 1000,
"msec"
)
print(
"Single torrent time:",
(dt_single_torrent_end - dt_single_torrent_begin).microseconds / 1000,
"msec"
)
print(
"Single user time:",
(dt_user_end - dt_user_begin).microseconds / 1000,
"msec"
)

6
tests/test_pantsu.py Normal file
View File

@@ -0,0 +1,6 @@
"""
* Pantsu need some serious work
Regular data single_torrent parser not working from other Nyaa alternatives
Needs some work
"""
print("TODO")

58
tests/test_sukebei.py Normal file
View File

@@ -0,0 +1,58 @@
from NyaaPy.sukebei import SukebeiNyaa
from datetime import datetime
import json
import os
# Creating a folder for test_files
# ! not included in github project.
if not os.path.isdir("test_files"):
os.makedirs("test_files")
nyaa = SukebeiNyaa()
# Get fresh torrents
dt_latest_torrents_begin = datetime.now()
latest_torrents = nyaa.last_uploads(100)
dt_latest_torrents_end = datetime.now()
with open("test_files/sukebei_latest_torrent_test.json", 'w') as f:
json.dump(latest_torrents, f)
# Search some nasty stuff
dt_search_begin = datetime.now()
test_search = nyaa.search("G Senjou no maou")
dt_search_end = datetime.now()
with open("test_files/sukebei_search_test.json", 'w') as f:
json.dump(test_search, f)
# Get first torrent from found torrents
dt_single_torrent_begin = datetime.now()
single_torrent = nyaa.get(test_search[0]["id"])
dt_single_torrent_end = datetime.now()
with open("test_files/sukebei_single_torrent_test.json", 'w') as f:
json.dump(single_torrent, f)
dt_user_begin = datetime.now()
user_torrents = nyaa.get_user("RUNBKK")
dt_user_end = datetime.now()
with open("test_files/sukebei_single_user_test.json", 'w') as f:
json.dump(user_torrents, f)
print(
"Latest torrents time:",
(dt_latest_torrents_end - dt_latest_torrents_begin).microseconds / 1000,
"msec")
print(
"Test search time:",
(dt_search_end - dt_search_begin).microseconds / 1000,
"msec"
)
print(
"Single torrent time:",
(dt_single_torrent_end - dt_single_torrent_begin).microseconds / 1000,
"msec"
)
print(
"Single user time:",
(dt_user_end - dt_user_begin).microseconds / 1000,
"msec"
)