randydev commited on
Commit
942b6ab
·
verified ·
1 Parent(s): f8849d9

Update torrents/glodls.py

Browse files
Files changed (1) hide show
  1. torrents/glodls.py +90 -0
torrents/glodls.py CHANGED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import aiohttp
3
+ from bs4 import BeautifulSoup
4
+ from helper.html_scraper import Scraper
5
+ from constants.base_url import GLODLS
6
+
7
+
8
+ class Glodls:
9
+ def __init__(self):
10
+ self.BASE_URL = GLODLS
11
+ self.LIMIT = None
12
+
13
+ def _parser(self, htmls):
14
+ try:
15
+ for html in htmls:
16
+ soup = BeautifulSoup(html, "html.parser")
17
+
18
+ my_dict = {"data": []}
19
+ for tr in soup.find_all("tr", class_="t-row")[0:-1:2]:
20
+ td = tr.find_all("td")
21
+ name = td[1].find_all("a")[-1].find("b").text
22
+ url = self.BASE_URL + td[1].find_all("a")[-1]["href"]
23
+ torrent = self.BASE_URL + td[2].find("a")["href"]
24
+ magnet = td[3].find("a")["href"]
25
+ size = td[4].text
26
+ seeders = td[5].find("font").find("b").text
27
+ leechers = td[6].find("font").find("b").text
28
+ try:
29
+ uploader = td[7].find("a").find("b").find("font").text
30
+ except:
31
+ uploader = ""
32
+ my_dict["data"].append(
33
+ {
34
+ "name": name,
35
+ "size": size,
36
+ "uploader": uploader,
37
+ "seeders": seeders,
38
+ "leechers": leechers,
39
+ "magnet": magnet,
40
+ "torrent": torrent,
41
+ "url": self.BASE_URL + url,
42
+ }
43
+ )
44
+ if len(my_dict["data"]) == self.LIMIT:
45
+ break
46
+ try:
47
+ pagination = soup.find("div", class_="pagination")
48
+ total_pages = pagination.find_all("a")[-2]["href"]
49
+ total_pages = total_pages.split("=")[-1]
50
+ my_dict["total_pages"] = int(total_pages) + 1
51
+ except:
52
+ ...
53
+ return my_dict
54
+ except:
55
+ return None
56
+
57
+ async def search(self, query, page, limit):
58
+ async with aiohttp.ClientSession() as session:
59
+ start_time = time.time()
60
+ self.LIMIT = limit
61
+ url = (
62
+ self.BASE_URL
63
+ + "/search_results.php?search={}&cat=0&incldead=0&inclexternal=0&lang=0&sort=seeders&order=desc&page={}".format(
64
+ query, page - 1
65
+ )
66
+ )
67
+ return await self.parser_result(start_time, url, session)
68
+
69
+ async def parser_result(self, start_time, url, session):
70
+ html = await Scraper().get_all_results(session, url)
71
+ results = self._parser(html)
72
+ if results is not None:
73
+ results["time"] = time.time() - start_time
74
+ results["total"] = len(results["data"])
75
+ return results
76
+ return results
77
+
78
+ async def trending(self, category, page, limit):
79
+ async with aiohttp.ClientSession() as session:
80
+ start_time = time.time()
81
+ self.LIMIT = limit
82
+ url = self.BASE_URL + "/today.php"
83
+ return await self.parser_result(start_time, url, session)
84
+
85
+ async def recent(self, category, page, limit):
86
+ async with aiohttp.ClientSession() as session:
87
+ start_time = time.time()
88
+ self.LIMIT = limit
89
+ url = self.BASE_URL + "/search.php"
90
+ return await self.parser_result(start_time, url, session)