Spaces:
Sleeping
Sleeping
Update torrents/x1337.py
Browse files- torrents/x1337.py +70 -82
torrents/x1337.py
CHANGED
@@ -1,67 +1,59 @@
|
|
1 |
import asyncio
|
2 |
import re
|
3 |
import time
|
4 |
-
import
|
5 |
from bs4 import BeautifulSoup
|
6 |
from helper.asyncioPoliciesFix import decorator_asyncio_fix
|
7 |
-
from helper.html_scraper import CloudScraper
|
8 |
from constants.base_url import X1337
|
9 |
from constants.headers import HEADER_AIO
|
10 |
|
11 |
-
|
12 |
class x1337:
|
13 |
def __init__(self):
|
14 |
self.BASE_URL = X1337
|
15 |
self.LIMIT = None
|
|
|
16 |
|
17 |
@decorator_asyncio_fix
|
18 |
-
async def _individual_scrap(self,
|
19 |
try:
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
try:
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
if len(imgs) > 0:
|
36 |
-
obj["screenshot"] = imgs
|
37 |
-
obj["category"] = lis.find("span").text
|
38 |
-
obj["files"] = files
|
39 |
-
try:
|
40 |
-
poster = soup.select_one("div.torrent-image img")["src"]
|
41 |
-
if str(poster).startswith("//"):
|
42 |
-
obj["poster"] = "https:" + poster
|
43 |
-
elif str(poster).startswith("/"):
|
44 |
-
obj["poster"] = self.BASE_URL + poster
|
45 |
-
except:
|
46 |
-
...
|
47 |
-
obj["magnet"] = magnet
|
48 |
-
|
49 |
-
obj["hash"] = re.search(
|
50 |
-
r"([{a-f\d,A-F\d}]{32,40})\b", magnet
|
51 |
-
).group(0)
|
52 |
-
except IndexError:
|
53 |
-
...
|
54 |
except:
|
55 |
return None
|
56 |
|
57 |
-
async def _get_torrent(self, result,
|
58 |
tasks = []
|
59 |
for idx, url in enumerate(urls):
|
60 |
for obj in result["data"]:
|
61 |
if obj["url"] == url:
|
62 |
-
task = asyncio.create_task(
|
63 |
-
self._individual_scrap(session, url, result["data"][idx])
|
64 |
-
)
|
65 |
tasks.append(task)
|
66 |
await asyncio.gather(*tasks)
|
67 |
return result
|
@@ -107,47 +99,46 @@ class x1337:
|
|
107 |
else:
|
108 |
my_dict["total_pages"] = int(pages[-1].text)
|
109 |
except:
|
110 |
-
|
111 |
return my_dict, list_of_urls
|
112 |
except:
|
113 |
return None, None
|
114 |
|
115 |
async def search(self, query, page, limit):
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
return await self.parser_result(
|
121 |
-
start_time, url, session, query=query, page=page
|
122 |
-
)
|
123 |
|
124 |
-
async def parser_result(self, start_time, url,
|
125 |
-
|
|
|
126 |
result, urls = self._parser(htmls)
|
127 |
if result is not None:
|
128 |
-
results = await self._get_torrent(result,
|
129 |
results["time"] = time.time() - start_time
|
130 |
results["total"] = len(results["data"])
|
131 |
if query is None:
|
132 |
return results
|
133 |
while True:
|
134 |
if len(results["data"]) >= self.LIMIT:
|
135 |
-
results["data"] = results["data"][
|
136 |
results["total"] = len(results["data"])
|
137 |
return results
|
138 |
-
page
|
139 |
url = self.BASE_URL + "/search/{}/{}/".format(query, page)
|
140 |
-
|
|
|
141 |
result, urls = self._parser(htmls)
|
142 |
if result is not None:
|
143 |
if len(result["data"]) > 0:
|
144 |
-
res = await self._get_torrent(result,
|
145 |
for obj in res["data"]:
|
146 |
results["data"].append(obj)
|
147 |
try:
|
148 |
results["current_page"] = res["current_page"]
|
149 |
except:
|
150 |
-
|
151 |
results["time"] = time.time() - start_time
|
152 |
results["total"] = len(results["data"])
|
153 |
else:
|
@@ -158,32 +149,29 @@ class x1337:
|
|
158 |
return result
|
159 |
|
160 |
async def trending(self, category, page, limit):
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
return await self.parser_result(start_time, url, session, page)
|
169 |
|
170 |
async def recent(self, category, page, limit):
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
return await self.parser_result(start_time, url, session, page)
|
181 |
|
182 |
async def search_by_category(self, query, category, page, limit):
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
return await self.parser_result(start_time, url, session, page, query)
|
|
|
1 |
import asyncio
|
2 |
import re
|
3 |
import time
|
4 |
+
import cloudscraper
|
5 |
from bs4 import BeautifulSoup
|
6 |
from helper.asyncioPoliciesFix import decorator_asyncio_fix
|
|
|
7 |
from constants.base_url import X1337
|
8 |
from constants.headers import HEADER_AIO
|
9 |
|
|
|
10 |
class x1337:
|
11 |
def __init__(self):
|
12 |
self.BASE_URL = X1337
|
13 |
self.LIMIT = None
|
14 |
+
self.scraper = cloudscraper.create_scraper()
|
15 |
|
16 |
@decorator_asyncio_fix
|
17 |
+
async def _individual_scrap(self, url, obj):
|
18 |
try:
|
19 |
+
html = await asyncio.to_thread(self.scraper.get, url, headers=HEADER_AIO)
|
20 |
+
html = html.text
|
21 |
+
soup = BeautifulSoup(html, "html.parser")
|
22 |
+
try:
|
23 |
+
magnet = soup.select_one(".no-top-radius > div > ul > li > a")["href"]
|
24 |
+
uls = soup.find_all("ul", class_="list")[1]
|
25 |
+
lis = uls.find_all("li")[0]
|
26 |
+
imgs = [
|
27 |
+
img["data-original"]
|
28 |
+
for img in (soup.find("div", id="description")).find_all("img")
|
29 |
+
if img["data-original"].endswith((".png", ".jpg", ".jpeg"))
|
30 |
+
]
|
31 |
+
files = [f.text for f in soup.find("div", id="files").find_all("li")]
|
32 |
+
if len(imgs) > 0:
|
33 |
+
obj["screenshot"] = imgs
|
34 |
+
obj["category"] = lis.find("span").text
|
35 |
+
obj["files"] = files
|
36 |
try:
|
37 |
+
poster = soup.select_one("div.torrent-image img")["src"]
|
38 |
+
if str(poster).startswith("//"):
|
39 |
+
obj["poster"] = "https:" + poster
|
40 |
+
elif str(poster).startswith("/"):
|
41 |
+
obj["poster"] = self.BASE_URL + poster
|
42 |
+
except:
|
43 |
+
pass
|
44 |
+
obj["magnet"] = magnet
|
45 |
+
obj["hash"] = re.search(r"([{a-f\d,A-F\d}]{32,40})\b", magnet).group(0)
|
46 |
+
except IndexError:
|
47 |
+
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
except:
|
49 |
return None
|
50 |
|
51 |
+
async def _get_torrent(self, result, urls):
|
52 |
tasks = []
|
53 |
for idx, url in enumerate(urls):
|
54 |
for obj in result["data"]:
|
55 |
if obj["url"] == url:
|
56 |
+
task = asyncio.create_task(self._individual_scrap(url, result["data"][idx]))
|
|
|
|
|
57 |
tasks.append(task)
|
58 |
await asyncio.gather(*tasks)
|
59 |
return result
|
|
|
99 |
else:
|
100 |
my_dict["total_pages"] = int(pages[-1].text)
|
101 |
except:
|
102 |
+
pass
|
103 |
return my_dict, list_of_urls
|
104 |
except:
|
105 |
return None, None
|
106 |
|
107 |
async def search(self, query, page, limit):
|
108 |
+
self.LIMIT = limit
|
109 |
+
start_time = time.time()
|
110 |
+
url = self.BASE_URL + "/search/{}/{}/".format(query, page)
|
111 |
+
return await self.parser_result(start_time, url, query=query, page=page)
|
|
|
|
|
|
|
112 |
|
113 |
+
async def parser_result(self, start_time, url, page, query=None):
|
114 |
+
html = await asyncio.to_thread(self.scraper.get, url, headers=HEADER_AIO)
|
115 |
+
htmls = [html.text]
|
116 |
result, urls = self._parser(htmls)
|
117 |
if result is not None:
|
118 |
+
results = await self._get_torrent(result, urls)
|
119 |
results["time"] = time.time() - start_time
|
120 |
results["total"] = len(results["data"])
|
121 |
if query is None:
|
122 |
return results
|
123 |
while True:
|
124 |
if len(results["data"]) >= self.LIMIT:
|
125 |
+
results["data"] = results["data"][: self.LIMIT]
|
126 |
results["total"] = len(results["data"])
|
127 |
return results
|
128 |
+
page += 1
|
129 |
url = self.BASE_URL + "/search/{}/{}/".format(query, page)
|
130 |
+
html = await asyncio.to_thread(self.scraper.get, url, headers=HEADER_AIO)
|
131 |
+
htmls = [html.text]
|
132 |
result, urls = self._parser(htmls)
|
133 |
if result is not None:
|
134 |
if len(result["data"]) > 0:
|
135 |
+
res = await self._get_torrent(result, urls)
|
136 |
for obj in res["data"]:
|
137 |
results["data"].append(obj)
|
138 |
try:
|
139 |
results["current_page"] = res["current_page"]
|
140 |
except:
|
141 |
+
pass
|
142 |
results["time"] = time.time() - start_time
|
143 |
results["total"] = len(results["data"])
|
144 |
else:
|
|
|
149 |
return result
|
150 |
|
151 |
async def trending(self, category, page, limit):
|
152 |
+
start_time = time.time()
|
153 |
+
self.LIMIT = limit
|
154 |
+
if not category:
|
155 |
+
url = self.BASE_URL + "/home/"
|
156 |
+
else:
|
157 |
+
url = self.BASE_URL + "/popular-{}".format(category.lower())
|
158 |
+
return await self.parser_result(start_time, url, page)
|
|
|
159 |
|
160 |
async def recent(self, category, page, limit):
|
161 |
+
start_time = time.time()
|
162 |
+
self.LIMIT = limit
|
163 |
+
if not category:
|
164 |
+
url = self.BASE_URL + "/trending"
|
165 |
+
else:
|
166 |
+
url = self.BASE_URL + "/cat/{}/{}/".format(
|
167 |
+
str(category).capitalize(), page
|
168 |
+
)
|
169 |
+
return await self.parser_result(start_time, url, page)
|
|
|
170 |
|
171 |
async def search_by_category(self, query, category, page, limit):
|
172 |
+
start_time = time.time()
|
173 |
+
self.LIMIT = limit
|
174 |
+
url = self.BASE_URL + "/category-search/{}/{}/{}/".format(
|
175 |
+
query, category.capitalize(), page
|
176 |
+
)
|
177 |
+
return await self.parser_result(start_time, url, page, query)
|
|