randydev commited on
Commit
4e213b7
·
verified ·
1 Parent(s): 1058cc1

Create torrents/bitsearch.py

Browse files
Files changed (1) hide show
  1. torrents/bitsearch.py +107 -0
torrents/bitsearch.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import time
3
+ import aiohttp
4
+ from bs4 import BeautifulSoup
5
+ from helper.html_scraper import Scraper
6
+ from constants.base_url import BITSEARCH
7
+
8
+
9
+ class Bitsearch:
10
+ def __init__(self):
11
+ self.BASE_URL = BITSEARCH
12
+ self.LIMIT = None
13
+
14
+ def _parser(self, htmls):
15
+ try:
16
+ for html in htmls:
17
+ soup = BeautifulSoup(html, "html.parser")
18
+
19
+ my_dict = {"data": []}
20
+ for divs in soup.find_all("li", class_="search-result"):
21
+ info = divs.find("div", class_="info")
22
+ name = info.find("h5", class_="title").find("a").text
23
+ url = info.find("h5", class_="title").find("a")["href"]
24
+ category = info.find("div").find("a", class_="category").text
25
+ if not category:
26
+ continue
27
+ stats = info.find("div", class_="stats").find_all("div")
28
+ if stats:
29
+ downloads = stats[0].text
30
+ size = stats[1].text
31
+ seeders = stats[2].text.strip()
32
+ leechers = stats[3].text.strip()
33
+ date = stats[4].text
34
+ links = divs.find("div", class_="links").find_all("a")
35
+ magnet = links[1]["href"]
36
+ torrent = links[0]["href"]
37
+ my_dict["data"].append(
38
+ {
39
+ "name": name,
40
+ "size": size,
41
+ "seeders": seeders,
42
+ "leechers": leechers,
43
+ "category": category,
44
+ "hash": re.search(
45
+ r"([{a-f\d,A-F\d}]{32,40})\b", magnet
46
+ ).group(0),
47
+ "magnet": magnet,
48
+ "torrent": torrent,
49
+ "url": self.BASE_URL + url,
50
+ "date": date,
51
+ "downloads": downloads,
52
+ }
53
+ )
54
+ if len(my_dict["data"]) == self.LIMIT:
55
+ break
56
+ try:
57
+ total_pages = (
58
+ int(
59
+ soup.select(
60
+ "body > main > div.container.mt-2 > div > div:nth-child(1) > div > span > b"
61
+ )[0].text
62
+ )
63
+ / 20
64
+ ) # !20 search result available on each page
65
+ total_pages = (
66
+ total_pages + 1
67
+ if type(total_pages) == float
68
+ else total_pages
69
+ if int(total_pages) > 0
70
+ else total_pages + 1
71
+ )
72
+
73
+ current_page = int(
74
+ soup.find("div", class_="pagination")
75
+ .find("a", class_="active")
76
+ .text
77
+ )
78
+ my_dict["current_page"] = current_page
79
+ my_dict["total_pages"] = int(total_pages)
80
+ except:
81
+ ...
82
+ return my_dict
83
+ except:
84
+ return None
85
+
86
+ async def search(self, query, page, limit):
87
+ async with aiohttp.ClientSession() as session:
88
+ start_time = time.time()
89
+ self.LIMIT = limit
90
+ url = self.BASE_URL + "/search?q={}&page={}".format(query, page)
91
+ return await self.parser_result(start_time, url, session)
92
+
93
+ async def parser_result(self, start_time, url, session):
94
+ html = await Scraper().get_all_results(session, url)
95
+ results = self._parser(html)
96
+ if results is not None:
97
+ results["time"] = time.time() - start_time
98
+ results["total"] = len(results["data"])
99
+ return results
100
+ return results
101
+
102
+ async def trending(self, category, page, limit):
103
+ async with aiohttp.ClientSession() as session:
104
+ start_time = time.time()
105
+ self.LIMIT = limit
106
+ url = self.BASE_URL + "/trending"
107
+ return await self.parser_result(start_time, url, session)