Nyaa.si & sukebei.nyaa.si LXML fully ready

This commit is contained in:
Ferenc Nánási
2020-02-01 15:11:35 +01:00
parent bf01a922f0
commit 5c93e516ba
6 changed files with 126 additions and 87 deletions

View File

@@ -1,12 +1,11 @@
import requests
import urllib.parse
from NyaaPy import utils
class Nyaa:
def __init__(self):
self.URI = "http://nyaa.si"
self.URI = "https://nyaa.si"
def last_uploads(self, number_of_results):
r = requests.get(self.URI)

View File

@@ -1,8 +1,12 @@
import requests
from bs4 import BeautifulSoup
from NyaaPy import utils
class SukebeiNyaa:
def __init__(self):
self.URI = "https://sukebei.nyaa.si"
def search(self, keyword, **kwargs):
category = kwargs.get('category', 0)
subcategory = kwargs.get('subcategory', 0)
@@ -11,37 +15,37 @@ class SukebeiNyaa:
if page > 0:
r = requests.get("{}/?f={}&c={}_{}&q={}&p={}".format(
"http://sukebei.nyaa.si", filters, category, subcategory,
self.URI, filters, category, subcategory,
keyword, page))
else:
r = requests.get("{}/?f={}&c={}_{}&q={}".format(
"http://sukebei.nyaa.si", filters, category, subcategory,
self.URI, filters, category, subcategory,
keyword))
soup = BeautifulSoup(r.text, 'html.parser')
rows = soup.select('table tr')
return utils.parse_nyaa(rows, limit=None)
r.raise_for_status()
return utils.parse_nyaa(r.text, limit=None, sukebei=True)
def get(self, id):
r = requests.get("http://sukebei.nyaa.si/view/{}".format(id))
soup = BeautifulSoup(r.text, 'html.parser')
content = soup.findAll("div", {"class": "panel", "id": None})
r = requests.get("{}/view/{}".format(self.URI, id))
r.raise_for_status()
return utils.parse_single(content)
return utils.parse_single(r.text, sukebei=True)
def get_user(self, username):
r = requests.get("http://sukebei.nyaa.si/user/{}".format(username))
soup = BeautifulSoup(r.text, 'html.parser')
r = requests.get("{}/user/{}".format(self.URI, username))
r.raise_for_status()
return utils.parse_nyaa(soup.select('table tr'), limit=None)
return utils.parse_nyaa(r.text, limit=None, sukebei=True)
def news(self, number_of_results):
r = requests.get("http://sukebei.nyaa.si/")
soup = BeautifulSoup(r.text, 'html.parser')
rows = soup.select('table tr')
def last_uploads(self, number_of_results):
r = requests.get(self.URI)
r.raise_for_status()
return utils.parse_sukebei(rows, limit=number_of_results + 1)
return utils.parse_nyaa(
r.text,
limit=number_of_results + 1,
sukebei=True
)
class SukebeiPantsu:

View File

@@ -72,10 +72,15 @@ def nyaa_categories(b):
return category_name
def parse_nyaa(request_text, limit):
def parse_nyaa(request_text, limit, sukebei=False):
parser = etree.HTMLParser()
tree = etree.fromstring(request_text, parser)
if sukebei is False:
uri = "https://nyaa.si"
else:
uri = "https://sukebei.nyaa.si"
torrents = []
# Going through table rows
@@ -109,10 +114,10 @@ def parse_nyaa(request_text, limit):
try:
torrent = {
'id': block[1],
'category': nyaa_categories(block[0]),
'url': "https://nyaa.si/view/{}".format(block[1]),
'category': nyaa_categories(block[0]) if sukebei is False else sukebei_categories(block[0]),
'url': "{}/view/{}".format(uri, block[1]),
'name': block[2],
'download_url': "https://nyaa.si/download/{}".format(block[3]),
'download_url': "{}/download/{}".format(uri, block[3]),
'magnet': block[4],
'size': block[5],
'date': block[6],
@@ -127,10 +132,15 @@ def parse_nyaa(request_text, limit):
return torrents
def parse_single(request_text):
def parse_single(request_text, sukebei=False):
parser = etree.HTMLParser()
tree = etree.fromstring(request_text, parser)
if sukebei is False:
uri = "https://nyaa.si"
else:
uri = "https://sukebei.nyaa.si"
torrent = {}
data = []
torrent_files = []
@@ -152,7 +162,7 @@ def parse_single(request_text):
tree.xpath("//h3[@class='panel-title']/text()")[0].strip()
torrent['category'] = data[0]
torrent['uploader'] = data[4]
torrent['uploader_profile'] = "http://nyaa.si/user/{}".format(data[4])
torrent['uploader_profile'] = "{}/user/{}".format(uri, data[4])
torrent['website'] = data[6]
torrent['size'] = data[8]
torrent['date'] = data[3]
@@ -169,49 +179,8 @@ def parse_single(request_text):
return torrent
def parse_sukebei(table_rows, limit):
if limit == 0:
limit = len(table_rows)
torrents = []
for row in table_rows[:limit]:
block = []
for td in row.find_all('td'):
for link in td.find_all('a'):
if link.get('href')[-9:] != '#comments':
block.append(link.get('href'))
block.append(link.text.rstrip())
if td.text.rstrip():
block.append(td.text.rstrip())
try:
torrent = {
'id': block[1].replace("/view/", ""),
'category': sukebei_categories(block[0]),
'url': "http://sukebei.nyaa.si{}".format(block[1]),
'name': block[2],
'download_url': "http://sukebei.nyaa.si{}".format(
block[4]),
'magnet': block[5],
'size': block[6],
'date': block[7],
'seeders': block[8],
'leechers': block[9],
'completed_downloads': block[10],
}
except IndexError as ie:
pass
torrents.append(torrent)
return torrents
def sukebei_categories(b):
c = b.replace('/?c=', '')
c = b.replace('?c=', '')
cats = c.split('_')
cat = cats[0]