File size: 3,541 Bytes
942b6ab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import time
import aiohttp
from bs4 import BeautifulSoup
from helper.html_scraper import Scraper
from constants.base_url import GLODLS


class Glodls:
    def __init__(self):
        self.BASE_URL = GLODLS
        self.LIMIT = None

    def _parser(self, htmls):
        try:
            for html in htmls:
                soup = BeautifulSoup(html, "html.parser")

                my_dict = {"data": []}
                for tr in soup.find_all("tr", class_="t-row")[0:-1:2]:
                    td = tr.find_all("td")
                    name = td[1].find_all("a")[-1].find("b").text
                    url = self.BASE_URL + td[1].find_all("a")[-1]["href"]
                    torrent = self.BASE_URL + td[2].find("a")["href"]
                    magnet = td[3].find("a")["href"]
                    size = td[4].text
                    seeders = td[5].find("font").find("b").text
                    leechers = td[6].find("font").find("b").text
                    try:
                        uploader = td[7].find("a").find("b").find("font").text
                    except:
                        uploader = ""
                    my_dict["data"].append(
                        {
                            "name": name,
                            "size": size,
                            "uploader": uploader,
                            "seeders": seeders,
                            "leechers": leechers,
                            "magnet": magnet,
                            "torrent": torrent,
                            "url": self.BASE_URL + url,
                        }
                    )
                    if len(my_dict["data"]) == self.LIMIT:
                        break
                try:
                    pagination = soup.find("div", class_="pagination")
                    total_pages = pagination.find_all("a")[-2]["href"]
                    total_pages = total_pages.split("=")[-1]
                    my_dict["total_pages"] = int(total_pages) + 1
                except:
                    ...
                return my_dict
        except:
            return None

    async def search(self, query, page, limit):
        async with aiohttp.ClientSession() as session:
            start_time = time.time()
            self.LIMIT = limit
            url = (
                self.BASE_URL
                + "/search_results.php?search={}&cat=0&incldead=0&inclexternal=0&lang=0&sort=seeders&order=desc&page={}".format(
                    query, page - 1
                )
            )
            return await self.parser_result(start_time, url, session)

    async def parser_result(self, start_time, url, session):
        html = await Scraper().get_all_results(session, url)
        results = self._parser(html)
        if results is not None:
            results["time"] = time.time() - start_time
            results["total"] = len(results["data"])
            return results
        return results

    async def trending(self, category, page, limit):
        async with aiohttp.ClientSession() as session:
            start_time = time.time()
            self.LIMIT = limit
            url = self.BASE_URL + "/today.php"
            return await self.parser_result(start_time, url, session)

    async def recent(self, category, page, limit):
        async with aiohttp.ClientSession() as session:
            start_time = time.time()
            self.LIMIT = limit
            url = self.BASE_URL + "/search.php"
            return await self.parser_result(start_time, url, session)