randydev commited on
Commit
46ff3d8
·
verified ·
1 Parent(s): b9a9892

Create x1337.py

Browse files
Files changed (1) hide show
  1. torrents/x1337.py +191 -0
torrents/x1337.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import re
3
+ import time
4
+ import aiohttp
5
+ from bs4 import BeautifulSoup
6
+ from helper.asyncioPoliciesFix import decorator_asyncio_fix
7
+ from helper.html_scraper import Scraper
8
+ from constants.base_url import X1337
9
+ from constants.headers import HEADER_AIO
10
+
11
+
12
+ class x1337:
13
+ def __init__(self):
14
+ self.BASE_URL = X1337
15
+ self.LIMIT = None
16
+
17
+ @decorator_asyncio_fix
18
+ async def _individual_scrap(self, session, url, obj):
19
+ try:
20
+ async with session.get(url, headers=HEADER_AIO) as res:
21
+ html = await res.text(encoding="ISO-8859-1")
22
+ soup = BeautifulSoup(html, "html.parser")
23
+ try:
24
+ magnet = soup.select_one(".no-top-radius > div > ul > li > a")[
25
+ "href"
26
+ ]
27
+ uls = soup.find_all("ul", class_="list")[1]
28
+ lis = uls.find_all("li")[0]
29
+ imgs = [
30
+ img["data-original"]
31
+ for img in (soup.find("div", id="description")).find_all("img")
32
+ if img["data-original"].endswith((".png", ".jpg", ".jpeg"))
33
+ ]
34
+ files = [
35
+ f.text for f in soup.find("div", id="files").find_all("li")
36
+ ]
37
+ if len(imgs) > 0:
38
+ obj["screenshot"] = imgs
39
+ obj["category"] = lis.find("span").text
40
+ obj["files"] = files
41
+ try:
42
+ poster = soup.select_one("div.torrent-image img")["src"]
43
+ if str(poster).startswith("//"):
44
+ obj["poster"] = "https:" + poster
45
+ elif str(poster).startswith("/"):
46
+ obj["poster"] = self.BASE_URL + poster
47
+ except:
48
+ ...
49
+ obj["magnet"] = magnet
50
+
51
+ obj["hash"] = re.search(
52
+ r"([{a-f\d,A-F\d}]{32,40})\b", magnet
53
+ ).group(0)
54
+ except IndexError:
55
+ ...
56
+ except:
57
+ return None
58
+
59
+ async def _get_torrent(self, result, session, urls):
60
+ tasks = []
61
+ for idx, url in enumerate(urls):
62
+ for obj in result["data"]:
63
+ if obj["url"] == url:
64
+ task = asyncio.create_task(
65
+ self._individual_scrap(session, url, result["data"][idx])
66
+ )
67
+ tasks.append(task)
68
+ await asyncio.gather(*tasks)
69
+ return result
70
+
71
+ def _parser(self, htmls):
72
+ try:
73
+ for html in htmls:
74
+ soup = BeautifulSoup(html, "html.parser")
75
+ list_of_urls = []
76
+ my_dict = {"data": []}
77
+ trs = soup.select("tbody tr")
78
+ for tr in trs:
79
+ td = tr.find_all("td")
80
+ name = td[0].find_all("a")[-1].text
81
+ if name:
82
+ url = self.BASE_URL + td[0].find_all("a")[-1]["href"]
83
+ list_of_urls.append(url)
84
+ seeders = td[1].text
85
+ leechers = td[2].text
86
+ date = td[3].text
87
+ size = td[4].text.replace(seeders, "")
88
+ uploader = td[5].find("a").text
89
+
90
+ my_dict["data"].append(
91
+ {
92
+ "name": name,
93
+ "size": size,
94
+ "date": date,
95
+ "seeders": seeders,
96
+ "leechers": leechers,
97
+ "url": url,
98
+ "uploader": uploader,
99
+ }
100
+ )
101
+ if len(my_dict["data"]) == self.LIMIT:
102
+ break
103
+ try:
104
+ pages = soup.select(".pagination li a")
105
+ my_dict["current_page"] = int(pages[0].text)
106
+ tpages = pages[-1].text
107
+ if tpages == ">>":
108
+ my_dict["total_pages"] = int(pages[-2].text)
109
+ else:
110
+ my_dict["total_pages"] = int(pages[-1].text)
111
+ except:
112
+ ...
113
+ return my_dict, list_of_urls
114
+ except:
115
+ return None, None
116
+
117
+ async def search(self, query, page, limit):
118
+ async with aiohttp.ClientSession() as session:
119
+ self.LIMIT = limit
120
+ start_time = time.time()
121
+ url = self.BASE_URL + "/search/{}/{}/".format(query, page)
122
+ return await self.parser_result(
123
+ start_time, url, session, query=query, page=page
124
+ )
125
+
126
+ async def parser_result(self, start_time, url, session, page, query=None):
127
+ htmls = await Scraper().get_all_results(session, url)
128
+ result, urls = self._parser(htmls)
129
+ if result is not None:
130
+ results = await self._get_torrent(result, session, urls)
131
+ results["time"] = time.time() - start_time
132
+ results["total"] = len(results["data"])
133
+ if query is None:
134
+ return results
135
+ while True:
136
+ if len(results["data"]) >= self.LIMIT:
137
+ results["data"] = results["data"][0 : self.LIMIT]
138
+ results["total"] = len(results["data"])
139
+ return results
140
+ page = page + 1
141
+ url = self.BASE_URL + "/search/{}/{}/".format(query, page)
142
+ htmls = await Scraper().get_all_results(session, url)
143
+ result, urls = self._parser(htmls)
144
+ if result is not None:
145
+ if len(result["data"]) > 0:
146
+ res = await self._get_torrent(result, session, urls)
147
+ for obj in res["data"]:
148
+ results["data"].append(obj)
149
+ try:
150
+ results["current_page"] = res["current_page"]
151
+ except:
152
+ ...
153
+ results["time"] = time.time() - start_time
154
+ results["total"] = len(results["data"])
155
+ else:
156
+ break
157
+ else:
158
+ break
159
+ return results
160
+ return result
161
+
162
+ async def trending(self, category, page, limit):
163
+ async with aiohttp.ClientSession() as session:
164
+ start_time = time.time()
165
+ self.LIMIT = limit
166
+ if not category:
167
+ url = self.BASE_URL + "/home/"
168
+ else:
169
+ url = self.BASE_URL + "/popular-{}".format(category.lower())
170
+ return await self.parser_result(start_time, url, session, page)
171
+
172
+ async def recent(self, category, page, limit):
173
+ async with aiohttp.ClientSession() as session:
174
+ start_time = time.time()
175
+ self.LIMIT = limit
176
+ if not category:
177
+ url = self.BASE_URL + "/trending"
178
+ else:
179
+ url = self.BASE_URL + "/cat/{}/{}/".format(
180
+ str(category).capitalize(), page
181
+ )
182
+ return await self.parser_result(start_time, url, session, page)
183
+
184
+ async def search_by_category(self, query, category, page, limit):
185
+ async with aiohttp.ClientSession() as session:
186
+ start_time = time.time()
187
+ self.LIMIT = limit
188
+ url = self.BASE_URL + "/category-search/{}/{}/{}/".format(
189
+ query, category.capitalize(), page
190
+ )
191
+ return await self.parser_result(start_time, url, session, page, query)