Create torrent_galaxy.py
Browse files- torrents/torrent_galaxy.py +227 -0
torrents/torrent_galaxy.py
ADDED
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import time
|
3 |
+
import aiohttp
|
4 |
+
from bs4 import BeautifulSoup
|
5 |
+
from helper.html_scraper import Scraper
|
6 |
+
from constants.base_url import TGX
|
7 |
+
|
8 |
+
|
9 |
+
class TorrentGalaxy:
|
10 |
+
def __init__(self):
|
11 |
+
self.BASE_URL = TGX
|
12 |
+
self.LIMIT = None
|
13 |
+
|
14 |
+
def _parser_individual(self, html):
|
15 |
+
try:
|
16 |
+
soup = BeautifulSoup(html[0], "html.parser")
|
17 |
+
my_dict = {"data": []}
|
18 |
+
root_div = soup.find("div", class_="gluewrapper")
|
19 |
+
post_nd_torrents = root_div.find_next("div").find_all("div")
|
20 |
+
poster = post_nd_torrents[1].find("img")["data-src"]
|
21 |
+
torrentsand_all = post_nd_torrents[4].find_all("a")
|
22 |
+
torrent_link = torrentsand_all[0]["href"]
|
23 |
+
magnet_link = torrentsand_all[1]["href"]
|
24 |
+
direct_link = self.BASE_URL + torrentsand_all[2]["href"]
|
25 |
+
|
26 |
+
details_root = soup.find("div", class_="gluewrapper").select(
|
27 |
+
"div > :nth-child(2) > div > .tprow"
|
28 |
+
)
|
29 |
+
|
30 |
+
name = details_root[0].find_all("div")[-1].get_text(strip=True)
|
31 |
+
category = (
|
32 |
+
details_root[3].find_all("div")[-1].get_text(strip=True).split(">")[0]
|
33 |
+
)
|
34 |
+
languagee = details_root[4].find_all("div")[-1].get_text(strip=True)
|
35 |
+
size = details_root[5].find_all("div")[-1].get_text(strip=True)
|
36 |
+
hash = details_root[6].find_all("div")[-1].get_text(strip=True)
|
37 |
+
username = (
|
38 |
+
details_root[7]
|
39 |
+
.find_all("div")[-1]
|
40 |
+
.find("span", class_="username")
|
41 |
+
.get_text(strip=True)
|
42 |
+
)
|
43 |
+
date_up = details_root[8].find_all("div")[-1].get_text(strip=True)
|
44 |
+
|
45 |
+
btns = details_root[10].find_all("button")
|
46 |
+
seeders = btns[0].find("span").get_text(strip=True)
|
47 |
+
leechers = btns[1].find("span").get_text(strip=True)
|
48 |
+
downloads = btns[2].find("span").get_text(strip=True)
|
49 |
+
imdb_id = soup.select_one("#imdbpage")["href"].split("/")[-1]
|
50 |
+
genre_list = [
|
51 |
+
x.get_text(strip=True) for x in details_root[11].find_all("a")
|
52 |
+
]
|
53 |
+
soup.find("div", id="intblockslide").find_all("a")
|
54 |
+
imgs = [
|
55 |
+
img["href"]
|
56 |
+
for img in (soup.find("div", id="intblockslide").find_all("a"))
|
57 |
+
if img["href"].endswith((".png", ".jpg", ".jpeg"))
|
58 |
+
]
|
59 |
+
my_dict["data"].append(
|
60 |
+
{
|
61 |
+
"name": name,
|
62 |
+
"size": size,
|
63 |
+
"seeders": seeders,
|
64 |
+
"language": languagee,
|
65 |
+
"leechers": leechers,
|
66 |
+
"category": category,
|
67 |
+
"uploader": username,
|
68 |
+
"downloads": downloads,
|
69 |
+
"poster": poster,
|
70 |
+
"direct_download_link": direct_link,
|
71 |
+
"imdb_id": imdb_id,
|
72 |
+
"hash": hash,
|
73 |
+
"magnet": magnet_link,
|
74 |
+
"torrent": torrent_link,
|
75 |
+
"screenshot": imgs,
|
76 |
+
"genre": genre_list,
|
77 |
+
"date": date_up,
|
78 |
+
}
|
79 |
+
)
|
80 |
+
return my_dict
|
81 |
+
except:
|
82 |
+
return None
|
83 |
+
|
84 |
+
def _parser(self, htmls):
|
85 |
+
try:
|
86 |
+
for html in htmls:
|
87 |
+
soup = BeautifulSoup(html, "html.parser")
|
88 |
+
|
89 |
+
my_dict = {"data": []}
|
90 |
+
for idx, divs in enumerate(soup.find_all("div", class_="tgxtablerow")):
|
91 |
+
div = divs.find_all("div")
|
92 |
+
try:
|
93 |
+
name = div[4].find("a").get_text(strip=True)
|
94 |
+
imdb_url = (div[4].find_all("a"))[-1]["href"]
|
95 |
+
except:
|
96 |
+
name = (div[1].find("a", class_="txlight")).find("b").text
|
97 |
+
imdb_url = (div[1].find_all("a"))[-1]["href"]
|
98 |
+
|
99 |
+
if name != "":
|
100 |
+
try:
|
101 |
+
magnet = div[5].find_all("a")[1]["href"]
|
102 |
+
torrent = div[5].find_all("a")[0]["href"]
|
103 |
+
except:
|
104 |
+
magnet = div[3].find_all("a")[1]["href"]
|
105 |
+
torrent = div[3].find_all("a")[0]["href"]
|
106 |
+
size = soup.select("span.badge.badge-secondary.txlight")[
|
107 |
+
idx
|
108 |
+
].text
|
109 |
+
try:
|
110 |
+
url = div[4].find("a")["href"]
|
111 |
+
except:
|
112 |
+
url = div[1].find("a", class_="txlight")["href"]
|
113 |
+
try:
|
114 |
+
date = div[12].get_text(strip=True)
|
115 |
+
except:
|
116 |
+
date = div[10].get_text(strip=True)
|
117 |
+
try:
|
118 |
+
seeders_leechers = div[11].find_all("b")
|
119 |
+
seeders = seeders_leechers[0].text
|
120 |
+
leechers = seeders_leechers[1].text
|
121 |
+
except:
|
122 |
+
seeders_leechers = div[11].find_all("b")
|
123 |
+
seeders = seeders_leechers[0].text
|
124 |
+
leechers = seeders_leechers[1].text
|
125 |
+
try:
|
126 |
+
uploader = (div[7].find("a")).find("span").text
|
127 |
+
except:
|
128 |
+
uploader = (div[5].find("a")).find("span").text
|
129 |
+
try:
|
130 |
+
category = (
|
131 |
+
div[0].find("small").text.replace(" ", "")
|
132 |
+
).split(":")[0]
|
133 |
+
except:
|
134 |
+
category = None
|
135 |
+
my_dict["data"].append(
|
136 |
+
{
|
137 |
+
"name": name,
|
138 |
+
"size": size,
|
139 |
+
"seeders": seeders,
|
140 |
+
"leechers": leechers,
|
141 |
+
"category": category,
|
142 |
+
"uploader": uploader,
|
143 |
+
"imdb_id": imdb_url.split("=")[-1],
|
144 |
+
"hash": re.search(
|
145 |
+
r"([{a-f\d,A-F\d}]{32,40})\b", magnet
|
146 |
+
).group(0),
|
147 |
+
"magnet": magnet,
|
148 |
+
"torrent": torrent,
|
149 |
+
"url": self.BASE_URL + url,
|
150 |
+
"date": date,
|
151 |
+
}
|
152 |
+
)
|
153 |
+
if len(my_dict["data"]) == self.LIMIT:
|
154 |
+
break
|
155 |
+
try:
|
156 |
+
ul = soup.find_all("ul", class_="pagination")[-1]
|
157 |
+
tpages = ul.find_all("li")[-2]
|
158 |
+
my_dict["current_page"] = int(
|
159 |
+
soup.select_one("li.page-item.active.txlight a").text.split(
|
160 |
+
" "
|
161 |
+
)[0]
|
162 |
+
)
|
163 |
+
my_dict["total_pages"] = int(tpages.find("a").text)
|
164 |
+
except:
|
165 |
+
my_dict["current_page"] = None
|
166 |
+
my_dict["total_pages"] = None
|
167 |
+
# ...
|
168 |
+
return my_dict
|
169 |
+
except:
|
170 |
+
return None
|
171 |
+
|
172 |
+
async def search(self, query, page, limit):
|
173 |
+
async with aiohttp.ClientSession() as session:
|
174 |
+
start_time = time.time()
|
175 |
+
self.LIMIT = limit
|
176 |
+
url = (
|
177 |
+
self.BASE_URL
|
178 |
+
+ "/torrents.php?search=+{}&sort=seeders&order=desc&page={}".format(
|
179 |
+
query, page - 1
|
180 |
+
)
|
181 |
+
)
|
182 |
+
return await self.parser_result(start_time, url, session)
|
183 |
+
|
184 |
+
async def get_torrent_by_url(self, torrent_url):
|
185 |
+
async with aiohttp.ClientSession() as session:
|
186 |
+
start_time = time.time()
|
187 |
+
return await self.parser_result(
|
188 |
+
start_time, torrent_url, session, is_individual=True
|
189 |
+
)
|
190 |
+
|
191 |
+
async def parser_result(self, start_time, url, session, is_individual=False):
|
192 |
+
html = await Scraper().get_all_results(session, url)
|
193 |
+
if is_individual:
|
194 |
+
results = self._parser_individual(html)
|
195 |
+
else:
|
196 |
+
results = self._parser(html)
|
197 |
+
if results is not None:
|
198 |
+
results["time"] = time.time() - start_time
|
199 |
+
results["total"] = len(results["data"])
|
200 |
+
return results
|
201 |
+
return results
|
202 |
+
|
203 |
+
async def trending(self, category, page, limit):
|
204 |
+
async with aiohttp.ClientSession() as session:
|
205 |
+
start_time = time.time()
|
206 |
+
self.LIMIT = limit
|
207 |
+
url = self.BASE_URL
|
208 |
+
return await self.parser_result(start_time, url, session)
|
209 |
+
|
210 |
+
async def recent(self, category, page, limit):
|
211 |
+
async with aiohttp.ClientSession() as session:
|
212 |
+
start_time = time.time()
|
213 |
+
self.LIMIT = limit
|
214 |
+
if not category:
|
215 |
+
url = self.BASE_URL + "/latest"
|
216 |
+
else:
|
217 |
+
if category == "documentaries":
|
218 |
+
category = "Docus"
|
219 |
+
url = (
|
220 |
+
self.BASE_URL
|
221 |
+
+ "/torrents.php?parent_cat={}&sort=id&order=desc&page={}".format(
|
222 |
+
str(category).capitalize(), page - 1
|
223 |
+
)
|
224 |
+
)
|
225 |
+
return await self.parser_result(start_time, url, session)
|
226 |
+
|
227 |
+
#! Maybe Implemented in Future
|