SoulofSukuna commited on
Commit
e9d67ff
·
verified ·
1 Parent(s): 775ce9c

Upload 44 files

Browse files
.dockerignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # created by virtualenv automatically
2
+ __pycache__
3
+ api-py
4
+ .env
5
+ function.*
6
+ .vscode
7
+ .github
.gitignore ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # created by virtualenv automatically
2
+ __pycache__
3
+ api-py
4
+ .env
5
+ function.*
6
+ .vscode
7
+ test.py
8
+ *.log
9
+ test.json
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM rendyprojects/python:latest
2
+
3
+ WORKDIR /app/
4
+
5
+ RUN apt -qq update
6
+
7
+ COPY . .
8
+
9
+ RUN pip3 install --upgrade pip setuptools
10
+ RUN pip3 install -r requirements.txt
11
+
12
+ RUN chown -R 1000:0 .
13
+ RUN chmod 777 .
14
+ RUN chown -R 1000:0 /usr
15
+ RUN chmod 777 /usr
16
+
17
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
Procfile ADDED
@@ -0,0 +1 @@
 
 
1
+ web: gunicorn -w 4 -k uvicorn.workers.UvicornWorker main:app
README.md CHANGED
@@ -1,4 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
- license: mit
3
- title: Tor Search Api
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <h2 align='center'>Torrents Api ✨</h2>
2
+ <p align="center">
3
+ <a href="https://github.com/Ryuk-me"><img title="Author" src="https://img.shields.io/badge/Author-Ryuk--me-red.svg?style=for-the-badge&logo=github"></a>
4
+ </p>
5
+
6
+ <p align="center">
7
+ <a href="https://github.com/Ryuk-me"><img title="Followers" src="https://img.shields.io/github/followers/Ryuk-me?color=teal&style=flat-square"></a>
8
+ <a href="https://github.com/Ryuk-me/Torrent-Api-py/stargazers/"><img title="Stars" src="https://img.shields.io/github/stars/ryuk-me/Torrent-Api-py?color=brown&style=flat-square"></a>
9
+ <a href="https://github.com/Ryuk-me/Torrent-Api-py/network/members"><img title="Forks" src="https://img.shields.io/github/forks/ryuk-me/Torrent-Api-py?color=lightgrey&style=flat-square"></a>
10
+ <a href="https://github.com/Ryuk-me/Torrent-Api-py/issues"><img title="issues" src="https://img.shields.io/github/issues/Ryuk-me/Torrent-Api-py?style=flat-square">
11
+ </a>
12
+ <img src='https://visitor-badge.glitch.me/badge?page_id=ryuk-me.Torrent-Api-py'>
13
+ </p>
14
+
15
+ <p align="center">
16
+ <span style='font-size: 19px'>
17
+ An Unofficial API for <span style='font-weight:600;'>1337x</span>, <span style='font-weight:600;'>Piratebay</span>, <span style='font-weight:bold;'>Nyaasi</span>, <span style='font-weight:bold;'>Torlock</span>, <span style='font-weight:bold;'>Torrent Galaxy</span>, <span style='font-weight:600;'>Zooqle</span>, <span style='font-weight:600;'>Kickass</span>, <span style='font-weight:600;'>Bitsearch</span>, <span style='font-weight:600;'>MagnetDL, </span>Libgen, YTS, Limetorrent, TorrentFunk, Glodls, TorrentProject and YourBittorrent
18
+ </span>
19
+ </p>
20
+
21
+ ## Installation
22
+
23
+ ```sh
24
+
25
+ # Clone the repo
26
+ $ git clone https://github.com/Ryuk-me/Torrent-Api-py
27
+
28
+ # Go to the repository
29
+ $ cd Torrent-Api-py
30
+
31
+ # Install virtualenv
32
+ $ pip install virtualenv
33
+
34
+ # Create Virtual Env
35
+ $ py -3 -m venv api-py
36
+
37
+ # Activate Virtual Env [Windows]
38
+ $ .\api-py\Scripts\activate
39
+
40
+ # Activate Virtual Env [Linux]
41
+ $ source api-py/bin/activate
42
+
43
+ # Install Dependencies
44
+ $ pip install -r requirements.txt
45
+
46
+ # Start
47
+ $ python main.py
48
+
49
+ # (optional) To Use a PROXY, set the HTTP Proxy environment variable
50
+ # You can also use a tor proxy using dperson/torproxy:latest
51
+ $ export HTTP_PROXY="http://proxy-host:proxy-port"
52
+
53
+ # To access API Open any browser/API Testing tool & move to the given URL
54
+ $ localhost:8009
55
+
56
+ ```
57
+
58
+
59
  ---
60
+
61
+ ## Supported Sites
62
+
63
+ | Website | Keyword | Url | Cloudfare |
64
+ | :------------: | :--------------: | :--------------------------: | :-------: |
65
+ | 1337x | `1337x` | https://1337x.to | ❌ |
66
+ | Torrent Galaxy | `tgx` | https://torrentgalaxy.to | ❌ |
67
+ | Torlock | `torlock` | https://www.torlock.com | ❌ |
68
+ | PirateBay | `piratebay` | https://thepiratebay10.org | ❌ |
69
+ | Nyaasi | `nyaasi` | https://nyaa.si | ❌ |
70
+ | Zooqle | `zooqle` | https://zooqle.com | ❌ |
71
+ | KickAss | `kickass` | https://kickasstorrents.to | ❌ |
72
+ | Bitsearch | `bitsearch` | https://bitsearch.to | ❌ |
73
+ | MagnetDL | `magnetdl` | https://www.magnetdl.com | ✅ |
74
+ | Libgen | `libgen` | https://libgen.is | ❌ |
75
+ | YTS | `yts` | https://yts.mx | ❌ |
76
+ | Limetorrent | `limetorrent` | https://www.limetorrents.pro | ❌ |
77
+ | TorrentFunk | `torrentfunk` | https://www.torrentfunk.com | ❌ |
78
+ | Glodls | `glodls` | https://glodls.to | ❌ |
79
+ | TorrentProject | `torrentproject` | https://torrentproject2.com | ❌ |
80
+ | YourBittorrent | `ybt` | https://yourbittorrent.com | ❌ |
81
+
82
  ---
83
+
84
+ <details open>
85
+ <summary style='font-size: 20px'><span style='font-size: 25px;font-weight:bold;'>Supported Methods and categories</span></summary>
86
+
87
+ > If you want to change the default limit site wise [Visit Here](https://github.com/Ryuk-me/Torrent-Api-py/blob/main/helper/is_site_available.py#L39)
88
+
89
+ <p>
90
+
91
+ ```json
92
+
93
+ {
94
+ "1337x": {
95
+ "trending_available": True,
96
+ "trending_category": True,
97
+ "search_by_category": True,
98
+ "recent_available": True,
99
+ "recent_category_available": True,
100
+ "categories": ["anime", "music", "games", "tv","apps","documentaries", "other", "xxx", "movies"],
101
+ "limit" : 100
102
+ },
103
+ "torlock": {
104
+ "trending_available": True,
105
+ "trending_category": True,
106
+ "search_by_category": False,
107
+ "recent_available": True,
108
+ "recent_category_available": True,
109
+ "categories": ["anime", "music", "games", "tv","apps", "documentaries", "other", "xxx", "movies", "books", "images"],
110
+ "limit" : 50
111
+ },
112
+ "zooqle": {
113
+ "trending_available": False,
114
+ "trending_category": False,
115
+ "search_by_category": False,
116
+ "recent_available": False,
117
+ "recent_category_available": False,
118
+ "categories": [],
119
+ "limit": 30
120
+ },
121
+ "magnetdl": {
122
+ "trending_available": False,
123
+ "trending_category": False,
124
+ "search_by_category": False,
125
+ "recent_available": True,
126
+ "recent_category_available": True,
127
+ "categories": ["apps", "movies", "music", "games", "tv", "books"],
128
+ "limit": 40
129
+ },
130
+ "tgx": {
131
+ "trending_available": True,
132
+ "trending_category": True,
133
+ "search_by_category": False,
134
+ "recent_available": True,
135
+ "recent_category_available": True,
136
+ "categories": ["anime", "music", "games", "tv",
137
+ "apps", "documentaries", "other", "xxx", "movies", "books"],
138
+ "limit": 50
139
+ },
140
+ "nyaasi": {
141
+ "trending_available": False,
142
+ "trending_category": False,
143
+ "search_by_category": False,
144
+ "recent_available": True,
145
+ "recent_category_available": False,
146
+ "categories": [],
147
+ "limit": 50
148
+
149
+ },
150
+ "piratebay": {
151
+ "trending_available": True,
152
+ "trending_category": False,
153
+ "search_by_category": False,
154
+ "recent_available": True,
155
+ "recent_category_available": True,
156
+ "categories": ["tv"],
157
+ "limit": 50
158
+ },
159
+ "bitsearch": {
160
+ "trending_available": True,
161
+ "trending_category": False,
162
+ "search_by_category": False,
163
+ "recent_available": False,
164
+ "recent_category_available": False,
165
+ "categories": [],
166
+ "limit": 50
167
+ },
168
+ "kickass": {
169
+ "trending_available": True,
170
+ "trending_category": True,
171
+ "search_by_category": False,
172
+ "recent_available": True,
173
+ "recent_category_available": True,
174
+ "categories": ["anime", "music", "games", "tv","apps", "documentaries", "other", "xxx", "movies", "books"],
175
+ "limit": 50
176
+ },
177
+ "libgen'": {
178
+ "trending_available": False,
179
+ "trending_category": False,
180
+ "search_by_category": False,
181
+ "recent_available": False,
182
+ "recent_category_available": False,
183
+ "categories": [],
184
+ "limit": 25
185
+ },
186
+ "yts": {
187
+ "trending_available": True,
188
+ "trending_category": False,
189
+ "search_by_category": False,
190
+ "recent_available": True,
191
+ "recent_category_available": False,
192
+ "categories": [],
193
+ "limit": 20
194
+ },
195
+ "limetorrent": {
196
+ "trending_available": True,
197
+ "trending_category": False,
198
+ "search_by_category": False,
199
+ "recent_available": True,
200
+ "recent_category_available": True,
201
+ "categories": ["anime", "music", "games", "tv",
202
+ "apps", "other", "movies", "books"], # applications and tv-shows
203
+ "limit": 50
204
+ },
205
+ "torrentfunk": {
206
+ "trending_available": True,
207
+ "trending_category": True,
208
+ "search_by_category": False,
209
+ "recent_available": True,
210
+ "recent_category_available": True,
211
+ "categories": ["anime", "music", "games", "tv",
212
+ "apps", "xxx", "movies", "books"], # television # software #adult # ebooks
213
+ "limit": 50
214
+ },
215
+ "glodls": {
216
+ "trending_available": True,
217
+ "trending_category": False,
218
+ "search_by_category": False,
219
+ "recent_available": True,
220
+ "recent_category_available": False,
221
+ "categories": [],
222
+ "limit": 45
223
+ },
224
+ "torrentproject": {
225
+ "trending_available": False,
226
+ "trending_category": False,
227
+ "search_by_category": False,
228
+ "recent_available": False,
229
+ "recent_category_available": False,
230
+ "categories": [],
231
+ "limit": 20
232
+ },
233
+ "ybt": {
234
+ "trending_available": True,
235
+ "trending_category": True,
236
+ "search_by_category": False,
237
+ "recent_available": True,
238
+ "recent_category_available": True,
239
+ "categories": ["anime", "music", "games", "tv",
240
+ "apps", "xxx", "movies", "books", "pictures", "other"], # book -> ebooks
241
+ "limit": 20
242
+ }
243
+
244
+ }
245
+ ```
246
+
247
+ </p>
248
+ </details>
249
+
250
+ ---
251
+
252
+ ## API Endpoints
253
+
254
+ <details open>
255
+ <summary style='font-size: 15px'><span style='font-size: 20px;font-weight:bold;'>Supported sites list</span></summary>
256
+ <p>
257
+
258
+ > [`api/v1/sites`](https://torrent-api-py-nx0x.onrender.com/api/v1/sites)
259
+
260
+ </p>
261
+ </details>
262
+ <br>
263
+
264
+ <details open>
265
+ <summary style='font-size: 15px'><span style='font-size: 20px;font-weight:bold;'>Search</span></summary>
266
+ <p>
267
+
268
+ > [`api/v1/search`](https://torrent-api-py-nx0x.onrender.com/api/v1/search)
269
+
270
+ | Parameter | Required | Type | Default | Example |
271
+ | :-------: | :------: | :-----: | :-----: | :------------------------------------------------------: |
272
+ | site | ✅ | string | None | `api/v1/search?site=1337x` |
273
+ | query | ✅ | string | None | `api/v1/search?site=1337x&query=avengers` |
274
+ | limit | ❌ | integer | Default | `api/v1/search?site=1337x&query=avengers&limit=20` |
275
+ | page | ❌ | integer | 1 | `api/v1/search?site=1337x&query=avengers&limit=0&page=2` |
276
+
277
+ </p>
278
+ </details>
279
+ <br>
280
+
281
+ <details open>
282
+ <summary style='font-size: 15px'><span style='font-size: 20px;font-weight:bold;'>Trending</span></summary>
283
+ <p>
284
+
285
+ > `api/v1/trending`
286
+
287
+ | Parameter | Required | Type | Default | Example |
288
+ | :-------: | :------: | :-----: | :-----: | :-----------------------------------------------------: |
289
+ | site | ✅ | string | None | `api/v1/trending?site=1337x` |
290
+ | limit | ❌ | integer | Default | `api/v1/trending?site=1337x&limit=10` |
291
+ | category | ❌ | string | None | `api/v1/trending?site=1337x&limit=0&category=tv` |
292
+ | page | ❌ | integer | 1 | `api/v1/trending?site=1337x&limit=6&category=tv&page=2` |
293
+
294
+ </p>
295
+ </details>
296
+ <br>
297
+
298
+ <details open>
299
+ <summary style='font-size: 15px'><span style='font-size: 20px;font-weight:bold;'>Recent</span></summary>
300
+ <p>
301
+
302
+ > `api/v1/recent`
303
+
304
+ | Parameter | Required | Type | Default | Example |
305
+ | :-------: | :------: | :-----: | :-----: | :----------------------------------------------------: |
306
+ | site | ✅ | string | None | `api/v1/recent?site=1337x` |
307
+ | limit | ❌ | integer | Default | `api/v1/recent?site=1337x&limit=7` |
308
+ | category | ❌ | string | None | `api/v1/recent?site=1337x&limit=0&category=tv` |
309
+ | page | ❌ | integer | 1 | `api/v1/recent?site=1337x&limit=15&category=tv&page=2` |
310
+
311
+ </p>
312
+ </details>
313
+ <br>
314
+
315
+ <details open>
316
+ <summary style='font-size: 15px'><span style='font-size: 20px;font-weight:bold;'>Search By Category</span></summary>
317
+ <p>
318
+
319
+ > `api/v1/category`
320
+
321
+ | Parameter | Required | Type | Default | Example |
322
+ | :-------: | :------: | :-----: | :-----: | :--------------------------------------------------------------------: |
323
+ | site | ✅ | string | None | `api/v1/category?site=1337x` |
324
+ | query | ✅ | string | None | `api/v1/category?site=1337x&query=avengers` |
325
+ | category | ✅ | string | None | `api/v1/category?site=1337x&query=avengers&category=movies` |
326
+ | limit | ❌ | integer | Default | `api/v1/category?site=1337x&query=avengers&category=movies&limit=10` |
327
+ | page | ❌ | integer | 1 | `api/v1/category?site=1337x&query=avengers&category=tv&limit=0&page=2` |
328
+
329
+ </p>
330
+ </details>
331
+
332
+ <br>
333
+
334
+ <details open>
335
+ <summary style='font-size: 15px'><span style='font-size: 20px;font-weight:bold;'>Search from all sites</span></summary>
336
+ <p>
337
+
338
+ > `api/v1/all/search`
339
+
340
+ | Parameter | Required | Type | Default | Example |
341
+ | :-------: | :------: | :-----: | :-----: | :----------------------------------------: |
342
+ | query | ✅ | string | None | `api/v1/all/search?query=avengers` |
343
+ | limit | ❌ | integer | Default | `api/v1/all/search?query=avengers&limit=5` |
344
+
345
+ <pre>Here <b>limit = 5</b> will get 5 results from each site.</pre>
346
+
347
+ > [api/v1/all/search?query=avengers](https://torrent-api-py-nx0x.onrender.com/api/v1/all/search?query=avengers)
348
+
349
+ > [api/v1/all/search?query=avengers&limit=5](https://torrent-api-py-nx0x.onrender.com/api/v1/all/search?query=avengers&limit=5)
350
+
351
+ </pre>
352
+ </details>
353
+
354
+ <br>
355
+
356
+ <details open>
357
+ <summary style='font-size: 15px'><span style='font-size: 20px;font-weight:bold;'>Get trending from all sites</span></summary>
358
+ <p>
359
+
360
+ > `api/v1/all/trending`
361
+
362
+ | Parameter | Required | Type | Default | Example |
363
+ | :-------: | :------: | :-----: | :-----: | :---------------------------: |
364
+ | limit | ❌ | integer | Default | `api/v1/all/trending?limit=2` |
365
+
366
+ > [api/v1/all/trending](https://torrent-api-py-nx0x.onrender.com/api/v1/all/trending)
367
+
368
+ > [api/v1/all/trending?limit=2](https://torrent-api-py-nx0x.onrender.com/api/v1/all/trending?limit=2)
369
+
370
+ </p>
371
+ </details>
372
+
373
+ <br>
374
+
375
+ <details open>
376
+ <summary style='font-size: 15px'><span style='font-size: 20px;font-weight:bold;'>Get recent from all sites</span></summary>
377
+ <p>
378
+
379
+ > `api/v1/all/recent`
380
+
381
+ | Parameter | Required | Type | Default | Example |
382
+ | :-------: | :------: | :-----: | :-----: | :-------------------------: |
383
+ | limit | ❌ | integer | Default | `api/v1/all/recent?limit=2` |
384
+
385
+ > [api/v1/all/recent](https://torrent-api-py-nx0x.onrender.com/api/v1/all/recent)
386
+
387
+ > [api/v1/all/recent?limit=2](https://torrent-api-py-nx0x.onrender.com/api/v1/all/recent)
388
+
389
+ </p>
390
+ </details>
391
+
392
+ ---
393
+
394
+ ## Want to Try api ?
395
+
396
+ > [api/v1/search?site=1337x&query=eternals](https://torrent-api-py-nx0x.onrender.com/api/v1/search?site=1337x&query=eternals)
397
+
398
+ <details open>
399
+ <summary> See response</summary>
400
+ <p>
401
+
402
+ ```json
403
+ {
404
+ "data": [
405
+ {
406
+ "name": "Eternals.2021.1080p.WEBRip.1600MB.DD5.1.x264-GalaxyRG",
407
+ "size": "1.6 GB",
408
+ "date": "Jan. 11th '22",
409
+ "seeders": "3674",
410
+ "leechers": "983",
411
+ "url": "https://1337x.to/torrent/5110228/Eternals-2021-1080p-WEBRip-1600MB-DD5-1-x264-GalaxyRG/",
412
+ "uploader": "TGxGoodies",
413
+ "screenshot": [
414
+ "https://everest.picturedent.org/images/2022/01/11/tmpposter23827.jpg",
415
+ "https://everest.picturedent.org/images/2022/01/11/Harone8014.th.jpg",
416
+ "https://everest.picturedent.org/images/2022/01/11/Harone31320.th.jpg",
417
+ "https://everest.picturedent.org/images/2022/01/11/Harone8129XqiKn.th.jpg",
418
+ "https://everest.picturedent.org/images/2022/01/11/Harone27162.th.jpg",
419
+ "https://everest.picturedent.org/images/2022/01/11/Harone1352.th.jpg",
420
+ "https://everest.picturedent.org/images/2022/01/11/Harone14355.th.jpg"
421
+ ],
422
+ "category": "Movies",
423
+ "files": [
424
+ "Eternals.2021.1080p.WEBRip.1600MB.DD5.1.x264-GalaxyRG.mkv (1.6 GB)",
425
+ "[TGx]Downloaded from torrentgalaxy.to .txt (0.7 KB)"
426
+ ],
427
+ "poster": "https://lx1.dyncdn.cc/cdn/02/0251ab7772c031c1130bc92810758cd4.jpg",
428
+ "magnet": "magnet:?xt=urn:btih:20F8D7C2942B143E6E2A0FB5562CDE7EE1B17822&dn=Eternals.2021.1080p.WEBRip.1600MB.DD5.1.x264-GalaxyRG&tr=udp://open.stealth.si:80/announce&tr=udp://tracker.tiny-vps.com:6969/announce&tr=udp://tracker.opentrackr.org:1337/announce&tr=udp://tracker.torrent.eu.org:451/announce&tr=udp://explodie.org:6969/announce&tr=udp://tracker.cyberia.is:6969/announce&tr=udp://ipv4.tracker.harry.lu:80/announce&tr=udp://p4p.arenabg.com:1337/announce&tr=udp://tracker.birkenwald.de:6969/announce&tr=udp://tracker.moeking.me:6969/announce&tr=udp://opentor.org:2710/announce&tr=udp://tracker.dler.org:6969/announce&tr=udp://9.rarbg.me:2970/announce&tr=https://tracker.foreverpirates.co:443/announce&tr=udp://tracker.opentrackr.org:1337/announce&tr=http://tracker.openbittorrent.com:80/announce&tr=udp://opentracker.i2p.rocks:6969/announce&tr=udp://tracker.internetwarriors.net:1337/announce&tr=udp://tracker.leechers-paradise.org:6969/announce&tr=udp://coppersurfer.tk:6969/announce&tr=udp://tracker.zer0day.to:1337/announce",
429
+ "hash": "20F8D7C2942B143E6E2A0FB5562CDE7EE1B17822"
430
+ }
431
+ ],
432
+ "current_page": 1,
433
+ "total_pages": 7,
434
+ "time": 1.276763677597046,
435
+ "total": 20
436
+ }
437
+ ```
438
+
439
+ </p>
440
+ </details>
441
+
442
+ ---
443
+
444
+ ## Donations
445
+
446
+ <p> If you feel like showing your appreciation for this project, then how about buying me a coffee?</p>
447
+
448
+ [!["Buy Me A Coffee"](https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png)](https://www.buymeacoffee.com/ryukmee)
449
+
450
+ ---
451
+
452
+ ## DEPLOY
453
+
454
+ <a href="https://render.com/deploy?repo=https://github.com/Ryuk-me/Torrent-Api-py">
455
+ <img src="https://render.com/images/deploy-to-render-button.svg" alt="Deploy to Render" />
456
+ </a>
457
+
458
+ </br>
459
+
460
+ [![Deploy](https://www.herokucdn.com/deploy/button.svg)](https://heroku.com/deploy)
app.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Torrent-Api-py",
3
+ "description": "An Unofficial API for 1337x, Piratebay, Nyaasi, Torlock, Torrent Galaxy, Zooqle, Kickass, Bitsearch, MagnetDL, Libgen, YTS, TorrentFunk, Glodls TorrentProject and YourBittorrent",
4
+ "keywords": [
5
+ "fast-api",
6
+ "python",
7
+ "torrent",
8
+ "api"
9
+ ],
10
+ "repository": "https://github.com/Ryuk-me/Torrent-Api-py"
11
+ }
constants/base_url.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ X1337 = "https://1337x.to"
2
+ TGX = "https://torrentgalaxy.to"
3
+ TORLOCK = "https://www.torlock.com"
4
+ PIRATEBAY = "https://thepiratebay10.org"
5
+ NYAASI = "https://nyaa.si"
6
+ ZOOQLE = "https://zooqle.com"
7
+ KICKASS = "https://kickasstorrents.to"
8
+ BITSEARCH = "https://bitsearch.to"
9
+ MAGNETDL = "https://www.magnetdl.com"
10
+ LIBGEN = "https://libgen.is"
11
+ YTS = "https://yts.mx"
12
+ LIMETORRENT = "https://www.limetorrents.pro"
13
+ TORRENTFUNK = "https://www.torrentfunk.com"
14
+ GLODLS = "https://glodls.to"
15
+ TORRENTPROJECT = "https://torrentproject2.com"
16
+ YOURBITTORRENT = "https://yourbittorrent.com"
constants/headers.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ HEADER_AIO = {
2
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.67",
3
+ "Cookie": "fencekey=0e31613a539b90e445bbcecafaa5a273",
4
+ }
docker-compose.yml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ version: '3.9'
2
+ services:
3
+ api-py:
4
+ build: .
5
+ ports:
6
+ - "8009:8009"
helper/__init__.py ADDED
File without changes
helper/asyncioPoliciesFix.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import sys
3
+
4
+
5
+ def decorator_asyncio_fix(func):
6
+ def wrapper(*args):
7
+ if (
8
+ sys.version_info[0] == 3
9
+ and sys.version_info[1] >= 8
10
+ and sys.platform.startswith("win")
11
+ ):
12
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
13
+ return func(*args)
14
+
15
+ return wrapper
helper/error_messages.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi.encoders import jsonable_encoder
2
+ from fastapi.responses import JSONResponse
3
+
4
+
5
+ def error_handler(status_code, json_message):
6
+ return JSONResponse(
7
+ status_code=status_code,
8
+ content=jsonable_encoder(json_message),
9
+ )
helper/html_scraper.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import asyncio
3
+ from .asyncioPoliciesFix import decorator_asyncio_fix
4
+ from constants.headers import HEADER_AIO
5
+
6
+ HTTP_PROXY = os.environ.get("HTTP_PROXY", None)
7
+
8
+
9
+ class Scraper:
10
+ @decorator_asyncio_fix
11
+ async def _get_html(self, session, url):
12
+ try:
13
+ async with session.get(url, headers=HEADER_AIO, proxy=HTTP_PROXY) as r:
14
+ return await r.text()
15
+ except:
16
+ return None
17
+
18
+ async def get_all_results(self, session, url):
19
+ return await asyncio.gather(asyncio.create_task(self._get_html(session, url)))
helper/is_site_available.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torrents.bitsearch import Bitsearch
2
+ from torrents.glodls import Glodls
3
+ from torrents.kickass import Kickass
4
+ from torrents.libgen import Libgen
5
+ from torrents.limetorrents import Limetorrent
6
+ from torrents.magnet_dl import Magnetdl
7
+ from torrents.nyaa_si import NyaaSi
8
+ from torrents.pirate_bay import PirateBay
9
+ from torrents.torlock import Torlock
10
+ from torrents.torrent_galaxy import TorrentGalaxy
11
+ from torrents.torrentfunk import TorrentFunk
12
+ from torrents.torrentProject import TorrentProject
13
+ from torrents.x1337 import x1337
14
+ from torrents.your_bittorrent import YourBittorrent
15
+ from torrents.yts import Yts
16
+ from torrents.zooqle import Zooqle
17
+
18
+
19
+ def check_if_site_available(site):
20
+ all_sites = {
21
+ "1337x": {
22
+ "website": x1337,
23
+ "trending_available": True,
24
+ "trending_category": True,
25
+ "search_by_category": True,
26
+ "recent_available": True,
27
+ "recent_category_available": True,
28
+ "categories": [
29
+ "anime",
30
+ "music",
31
+ "games",
32
+ "tv",
33
+ "apps",
34
+ "documentaries",
35
+ "other",
36
+ "xxx",
37
+ "movies",
38
+ ],
39
+ "limit": 100,
40
+ },
41
+ "torlock": {
42
+ "website": Torlock,
43
+ "trending_available": True,
44
+ "trending_category": True,
45
+ "search_by_category": False,
46
+ "recent_available": True,
47
+ "recent_category_available": True,
48
+ "categories": [
49
+ "anime",
50
+ "music",
51
+ "games",
52
+ "tv",
53
+ "apps",
54
+ "documentaries",
55
+ "other",
56
+ "xxx",
57
+ "movies",
58
+ "books",
59
+ "images",
60
+ ], # ebooks
61
+ "limit": 50,
62
+ },
63
+ "zooqle": {
64
+ "website": Zooqle,
65
+ "trending_available": False,
66
+ "trending_category": False,
67
+ "search_by_category": False,
68
+ "recent_available": False,
69
+ "recent_category_available": False,
70
+ "categories": [],
71
+ "limit": 30,
72
+ },
73
+ "magnetdl": {
74
+ "website": Magnetdl,
75
+ "trending_available": False,
76
+ "trending_category": False,
77
+ "search_by_category": False,
78
+ "recent_available": True,
79
+ "recent_category_available": True,
80
+ # e-books
81
+ "categories": ["apps", "movies", "music", "games", "tv", "books"],
82
+ "limit": 40,
83
+ },
84
+ "tgx": {
85
+ "website": TorrentGalaxy,
86
+ "trending_available": True,
87
+ "trending_category": True,
88
+ "search_by_category": False,
89
+ "recent_available": True,
90
+ "recent_category_available": True,
91
+ "categories": [
92
+ "anime",
93
+ "music",
94
+ "games",
95
+ "tv",
96
+ "apps",
97
+ "documentaries",
98
+ "other",
99
+ "xxx",
100
+ "movies",
101
+ "books",
102
+ ],
103
+ "limit": 50,
104
+ },
105
+ "nyaasi": {
106
+ "website": NyaaSi,
107
+ "trending_available": False,
108
+ "trending_category": False,
109
+ "search_by_category": False,
110
+ "recent_available": True,
111
+ "recent_category_available": False,
112
+ "categories": [],
113
+ "limit": 50,
114
+ },
115
+ "piratebay": {
116
+ "website": PirateBay,
117
+ "trending_available": True,
118
+ "trending_category": False,
119
+ "search_by_category": False,
120
+ "recent_available": True,
121
+ "recent_category_available": True,
122
+ "categories": ["tv"],
123
+ "limit": 50,
124
+ },
125
+ "bitsearch": {
126
+ "website": Bitsearch,
127
+ "trending_available": True,
128
+ "trending_category": False,
129
+ "search_by_category": False,
130
+ "recent_available": False,
131
+ "recent_category_available": False,
132
+ "categories": [],
133
+ "limit": 50,
134
+ },
135
+ "kickass": {
136
+ "website": Kickass,
137
+ "trending_available": True,
138
+ "trending_category": True,
139
+ "search_by_category": False,
140
+ "recent_available": True,
141
+ "recent_category_available": True,
142
+ "categories": [
143
+ "anime",
144
+ "music",
145
+ "games",
146
+ "tv",
147
+ "apps",
148
+ "documentaries",
149
+ "other",
150
+ "xxx",
151
+ "movies",
152
+ "books",
153
+ ], # television applications
154
+ "limit": 50,
155
+ },
156
+ "libgen": {
157
+ "website": Libgen,
158
+ "trending_available": False,
159
+ "trending_category": False,
160
+ "search_by_category": False,
161
+ "recent_available": False,
162
+ "recent_category_available": False,
163
+ "categories": [],
164
+ "limit": 25,
165
+ },
166
+ "yts": {
167
+ "website": Yts,
168
+ "trending_available": True,
169
+ "trending_category": False,
170
+ "search_by_category": False,
171
+ "recent_available": True,
172
+ "recent_category_available": False,
173
+ "categories": [],
174
+ "limit": 20,
175
+ },
176
+ "limetorrent": {
177
+ "website": Limetorrent,
178
+ "trending_available": True,
179
+ "trending_category": False,
180
+ "search_by_category": False,
181
+ "recent_available": True,
182
+ "recent_category_available": True,
183
+ "categories": [
184
+ "anime",
185
+ "music",
186
+ "games",
187
+ "tv",
188
+ "apps",
189
+ "other",
190
+ "movies",
191
+ "books",
192
+ ], # applications and tv-shows
193
+ "limit": 50,
194
+ },
195
+ "torrentfunk": {
196
+ "website": TorrentFunk,
197
+ "trending_available": True,
198
+ "trending_category": True,
199
+ "search_by_category": False,
200
+ "recent_available": True,
201
+ "recent_category_available": True,
202
+ "categories": [
203
+ "anime",
204
+ "music",
205
+ "games",
206
+ "tv",
207
+ "apps",
208
+ "xxx",
209
+ "movies",
210
+ "books",
211
+ ], # television # software #adult # ebooks
212
+ "limit": 50,
213
+ },
214
+ "glodls": {
215
+ "website": Glodls,
216
+ "trending_available": True,
217
+ "trending_category": False,
218
+ "search_by_category": False,
219
+ "recent_available": True,
220
+ "recent_category_available": False,
221
+ "categories": [],
222
+ "limit": 45,
223
+ },
224
+ "torrentproject": {
225
+ "website": TorrentProject,
226
+ "trending_available": False,
227
+ "trending_category": False,
228
+ "search_by_category": False,
229
+ "recent_available": False,
230
+ "recent_category_available": False,
231
+ "categories": [],
232
+ "limit": 20,
233
+ },
234
+ "ybt": {
235
+ "website": YourBittorrent,
236
+ "trending_available": True,
237
+ "trending_category": True,
238
+ "search_by_category": False,
239
+ "recent_available": True,
240
+ "recent_category_available": True,
241
+ "categories": [
242
+ "anime",
243
+ "music",
244
+ "games",
245
+ "tv",
246
+ "apps",
247
+ "xxx",
248
+ "movies",
249
+ "books",
250
+ "pictures",
251
+ "other",
252
+ ], # book -> ebooks
253
+ "limit": 20,
254
+ },
255
+ }
256
+
257
+ if site in all_sites.keys():
258
+ return all_sites
259
+ return False
helper/uptime.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import time
2
+
3
+
4
+ def getUptime(startTime: time) -> time:
5
+ """
6
+ Returns the number of seconds since the program started.
7
+ """
8
+ return time.time() - startTime
main.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import uvicorn
2
+ from fastapi import FastAPI, Request
3
+ from fastapi.responses import JSONResponse
4
+ from fastapi.middleware.cors import CORSMiddleware
5
+ from routers.v1.search_router import router as search_router
6
+ from routers.v1.trending_router import router as trending_router
7
+ from routers.v1.catergory_router import router as category_router
8
+ from routers.v1.recent_router import router as recent_router
9
+ from routers.v1.combo_routers import router as combo_router
10
+ from routers.v1.sites_list_router import router as site_list_router
11
+ from routers.home_router import router as home_router
12
+ from routers.v1.search_url_router import router as search_url_router
13
+ from helper.uptime import getUptime
14
+ from mangum import Mangum
15
+ from math import ceil
16
+ import time
17
+
18
+ startTime = time.time()
19
+
20
+ app = FastAPI(
21
+ title="Torrent-Api-Py",
22
+ version="1.0.1",
23
+ description=f"Unofficial Torrent-Api",
24
+ docs_url="/docs",
25
+ contact={
26
+ "name": "Neeraj Kumar",
27
+ "url": "https://github.com/ryuk-me",
28
+ "email": "[email protected]",
29
+ },
30
+ )
31
+
32
+ origins = ["*"]
33
+
34
+ app.add_middleware(
35
+ CORSMiddleware,
36
+ allow_origins=origins,
37
+ allow_credentials=True,
38
+ allow_methods=["*"],
39
+ allow_headers=["*"],
40
+ )
41
+
42
+
43
+ @app.get("/health")
44
+ async def health_route(req: Request):
45
+ """
46
+ Health Route : Returns App details.
47
+
48
+ """
49
+ return JSONResponse(
50
+ {
51
+ "app": "Torrent-Api-Py",
52
+ "version": "v" + "1.0.1",
53
+ "ip": req.client.host,
54
+ "uptime": ceil(getUptime(startTime)),
55
+ }
56
+ )
57
+
58
+
59
+ app.include_router(search_router, prefix="/api/v1/search")
60
+ app.include_router(trending_router, prefix="/api/v1/trending")
61
+ app.include_router(category_router, prefix="/api/v1/category")
62
+ app.include_router(recent_router, prefix="/api/v1/recent")
63
+ app.include_router(combo_router, prefix="/api/v1/all")
64
+ app.include_router(site_list_router, prefix="/api/v1/sites")
65
+ app.include_router(search_url_router, prefix="/api/v1/search_url")
66
+ app.include_router(home_router, prefix="")
67
+
68
+ handler = Mangum(app)
69
+
70
+ if __name__ == "__main__":
71
+ uvicorn.run(app, host="0.0.0.0", port=8009)
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ aiohttp[speedups]
2
+ beautifulsoup4
3
+ cloudscraper
4
+ fastapi==0.104.1
5
+ gunicorn
6
+ mangum
7
+ requests
8
+ uvicorn[standard]
routers/__init__.py ADDED
File without changes
routers/home_router.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter
2
+ from fastapi.responses import FileResponse
3
+
4
+
5
+ router = APIRouter(tags=["Home Route"])
6
+
7
+
8
+ @router.get("/")
9
+ async def home():
10
+ return FileResponse("README.md")
routers/v1/__init__.py ADDED
File without changes
routers/v1/catergory_router.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter
2
+ from fastapi import status
3
+ from typing import Optional
4
+ from helper.is_site_available import check_if_site_available
5
+ from helper.error_messages import error_handler
6
+
7
+ router = APIRouter(tags=["Category Torrents Route"])
8
+
9
+
10
+ @router.get("/")
11
+ @router.get("")
12
+ async def get_category(
13
+ site: str,
14
+ query: str,
15
+ category: str,
16
+ limit: Optional[int] = 0,
17
+ page: Optional[int] = 1,
18
+ ):
19
+ all_sites = check_if_site_available(site)
20
+ site = site.lower()
21
+ query = query.lower()
22
+ category = category.lower()
23
+ if all_sites:
24
+ limit = (
25
+ all_sites[site]["limit"]
26
+ if limit == 0 or limit > all_sites[site]["limit"]
27
+ else limit
28
+ )
29
+
30
+ if all_sites[site]["search_by_category"]:
31
+ if category not in all_sites[site]["categories"]:
32
+ return error_handler(
33
+ status_code=status.HTTP_404_NOT_FOUND,
34
+ json_message={
35
+ "error": "Selected category not available.",
36
+ "available_categories": all_sites[site]["categories"],
37
+ },
38
+ )
39
+ resp = await all_sites[site]["website"]().search_by_category(
40
+ query, category, page, limit
41
+ )
42
+ if resp is None:
43
+ return error_handler(
44
+ status_code=status.HTTP_403_FORBIDDEN,
45
+ json_message={
46
+ "error": "Website Blocked Change IP or Website Domain."
47
+ },
48
+ )
49
+ elif len(resp["data"]) > 0:
50
+ return resp
51
+ else:
52
+ return error_handler(
53
+ status_code=status.HTTP_404_NOT_FOUND,
54
+ json_message={"error": "Result not found."},
55
+ )
56
+ else:
57
+ return error_handler(
58
+ status_code=status.HTTP_404_NOT_FOUND,
59
+ json_message={
60
+ "error": "Category search not availabe for {}.".format(site)
61
+ },
62
+ )
63
+ return error_handler(
64
+ status_code=status.HTTP_404_NOT_FOUND,
65
+ json_message={"error": "Selected Site Not Available"},
66
+ )
routers/v1/combo_routers.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, status
2
+ from typing import Optional
3
+ from helper.is_site_available import check_if_site_available
4
+ import time
5
+ import asyncio
6
+ from helper.error_messages import error_handler
7
+
8
+
9
+ router = APIRouter(tags=["Combo Routes"])
10
+
11
+
12
+ @router.get("/search")
13
+ async def get_search_combo(query: str, limit: Optional[int] = 0):
14
+ start_time = time.time()
15
+ query = query.lower()
16
+ all_sites = check_if_site_available("1337x")
17
+ sites_list = list(all_sites.keys())
18
+ tasks = []
19
+ COMBO = {"data": []}
20
+ total_torrents_overall = 0
21
+ for site in sites_list:
22
+ limit = (
23
+ all_sites[site]["limit"]
24
+ if limit == 0 or limit > all_sites[site]["limit"]
25
+ else limit
26
+ )
27
+ tasks.append(
28
+ asyncio.create_task(
29
+ all_sites[site]["website"]().search(query, page=1, limit=limit)
30
+ )
31
+ )
32
+ results = await asyncio.gather(*tasks)
33
+ for res in results:
34
+ if res is not None and len(res["data"]) > 0:
35
+ for torrent in res["data"]:
36
+ COMBO["data"].append(torrent)
37
+ total_torrents_overall = total_torrents_overall + res["total"]
38
+ COMBO["time"] = time.time() - start_time
39
+ COMBO["total"] = total_torrents_overall
40
+ if total_torrents_overall == 0:
41
+ return error_handler(
42
+ status_code=status.HTTP_404_NOT_FOUND,
43
+ json_message={"error": "Result not found."},
44
+ )
45
+ return COMBO
46
+
47
+
48
+ @router.get("/trending")
49
+ async def get_all_trending(limit: Optional[int] = 0):
50
+ start_time = time.time()
51
+ # * just getting all_sites dictionary
52
+ all_sites = check_if_site_available("1337x")
53
+ sites_list = [
54
+ site
55
+ for site in all_sites.keys()
56
+ if all_sites[site]["trending_available"] and all_sites[site]["website"]
57
+ ]
58
+ tasks = []
59
+ COMBO = {"data": []}
60
+ total_torrents_overall = 0
61
+ for site in sites_list:
62
+ limit = (
63
+ all_sites[site]["limit"]
64
+ if limit == 0 or limit > all_sites[site]["limit"]
65
+ else limit
66
+ )
67
+ tasks.append(
68
+ asyncio.create_task(
69
+ all_sites[site]["website"]().trending(
70
+ category=None, page=1, limit=limit
71
+ )
72
+ )
73
+ )
74
+ results = await asyncio.gather(*tasks)
75
+ for res in results:
76
+ if res is not None and len(res["data"]) > 0:
77
+ for torrent in res["data"]:
78
+ COMBO["data"].append(torrent)
79
+ total_torrents_overall = total_torrents_overall + res["total"]
80
+ COMBO["time"] = time.time() - start_time
81
+ COMBO["total"] = total_torrents_overall
82
+ if total_torrents_overall == 0:
83
+ return error_handler(
84
+ status_code=status.HTTP_404_NOT_FOUND,
85
+ json_message={"error": "Result not found."},
86
+ )
87
+ return COMBO
88
+
89
+
90
+ @router.get("/recent")
91
+ async def get_all_recent(limit: Optional[int] = 0):
92
+ start_time = time.time()
93
+ # just getting all_sites dictionary
94
+ all_sites = check_if_site_available("1337x")
95
+ sites_list = [
96
+ site
97
+ for site in all_sites.keys()
98
+ if all_sites[site]["recent_available"] and all_sites[site]["website"]
99
+ ]
100
+ tasks = []
101
+ COMBO = {"data": []}
102
+ total_torrents_overall = 0
103
+ for site in sites_list:
104
+ limit = (
105
+ all_sites[site]["limit"]
106
+ if limit == 0 or limit > all_sites[site]["limit"]
107
+ else limit
108
+ )
109
+ tasks.append(
110
+ asyncio.create_task(
111
+ all_sites[site]["website"]().recent(category=None, page=1, limit=limit)
112
+ )
113
+ )
114
+ results = await asyncio.gather(*tasks)
115
+ for res in results:
116
+ if res is not None and len(res["data"]) > 0:
117
+ for torrent in res["data"]:
118
+ COMBO["data"].append(torrent)
119
+ total_torrents_overall = total_torrents_overall + res["total"]
120
+ COMBO["time"] = time.time() - start_time
121
+ COMBO["total"] = total_torrents_overall
122
+ if total_torrents_overall == 0:
123
+ return error_handler(
124
+ status_code=status.HTTP_404_NOT_FOUND,
125
+ json_message={"error": "Result not found."},
126
+ )
127
+ return COMBO
routers/v1/recent_router.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter
2
+ from fastapi import status
3
+ from typing import Optional
4
+ from helper.is_site_available import check_if_site_available
5
+ from helper.error_messages import error_handler
6
+
7
+ router = APIRouter(tags=["Recent Torrents Route"])
8
+
9
+
10
+ @router.get("/")
11
+ @router.get("")
12
+ async def get_recent(
13
+ site: str,
14
+ limit: Optional[int] = 0,
15
+ category: Optional[str] = None,
16
+ page: Optional[int] = 1,
17
+ ):
18
+ all_sites = check_if_site_available(site)
19
+ site = site.lower()
20
+ category = category.lower() if category is not None else None
21
+ if all_sites:
22
+ limit = (
23
+ all_sites[site]["limit"]
24
+ if limit == 0 or limit > all_sites[site]["limit"]
25
+ else limit
26
+ )
27
+ if all_sites[site]["recent_available"]:
28
+ if (
29
+ category is not None
30
+ and not all_sites[site]["recent_category_available"]
31
+ ):
32
+ return error_handler(
33
+ status_code=status.HTTP_404_NOT_FOUND,
34
+ json_message={
35
+ "error": "Search by Recent category not available for {}.".format(
36
+ site
37
+ )
38
+ },
39
+ )
40
+ if category is not None and category not in all_sites[site]["categories"]:
41
+ return error_handler(
42
+ status_code=status.HTTP_404_NOT_FOUND,
43
+ json_message={
44
+ "error": "Selected category not available.",
45
+ "available_categories": all_sites[site]["categories"],
46
+ },
47
+ )
48
+ resp = await all_sites[site]["website"]().recent(category, page, limit)
49
+ if resp is None:
50
+ return error_handler(
51
+ status_code=status.HTTP_403_FORBIDDEN,
52
+ json_message={
53
+ "error": "Website Blocked Change IP or Website Domain."
54
+ },
55
+ )
56
+
57
+ elif len(resp["data"]) > 0:
58
+ return resp
59
+ else:
60
+ return error_handler(
61
+ status_code=status.HTTP_404_NOT_FOUND,
62
+ json_message={"error": "Result not found."},
63
+ )
64
+ else:
65
+ return error_handler(
66
+ status_code=status.HTTP_404_NOT_FOUND,
67
+ json_message={
68
+ "error": "Recent search not availabe for {}.".format(site)
69
+ },
70
+ )
71
+ return error_handler(
72
+ status_code=status.HTTP_404_NOT_FOUND,
73
+ json_message={"error": "Selected Site Not Available"},
74
+ )
routers/v1/search_router.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter
2
+ from typing import Optional
3
+ from helper.is_site_available import check_if_site_available
4
+ from fastapi import status
5
+ from helper.error_messages import error_handler
6
+
7
+ router = APIRouter(tags=["Search"])
8
+
9
+
10
+ @router.get("/")
11
+ @router.get("")
12
+ async def search_for_torrents(
13
+ site: str, query: str, limit: Optional[int] = 0, page: Optional[int] = 1
14
+ ):
15
+ site = site.lower()
16
+ query = query.lower()
17
+ all_sites = check_if_site_available(site)
18
+ if all_sites:
19
+ limit = (
20
+ all_sites[site]["limit"]
21
+ if limit == 0 or limit > all_sites[site]["limit"]
22
+ else limit
23
+ )
24
+
25
+ resp = await all_sites[site]["website"]().search(query, page, limit)
26
+ if resp is None:
27
+ return error_handler(
28
+ status_code=status.HTTP_403_FORBIDDEN,
29
+ json_message={"error": "Website Blocked Change IP or Website Domain."},
30
+ )
31
+ elif len(resp["data"]) > 0:
32
+ return resp
33
+ else:
34
+ return error_handler(
35
+ status_code=status.HTTP_404_NOT_FOUND,
36
+ json_message={"error": "Result not found."},
37
+ )
38
+
39
+ return error_handler(
40
+ status_code=status.HTTP_404_NOT_FOUND,
41
+ json_message={"error": "Selected Site Not Available"},
42
+ )
routers/v1/search_url_router.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, status
2
+ from helper.is_site_available import check_if_site_available
3
+ from helper.error_messages import error_handler
4
+
5
+ router = APIRouter(tags=["Torrent By Url"])
6
+
7
+
8
+ # * Only supports 1337x AS OF NOW
9
+ @router.get("/")
10
+ @router.get("")
11
+ async def get_torrent_from_url(site: str, url: str):
12
+ site = site.lower()
13
+ all_sites = check_if_site_available(site)
14
+ if all_sites:
15
+ resp = await all_sites[site]["website"]().get_torrent_by_url(url)
16
+ if resp is None:
17
+ return error_handler(
18
+ status_code=status.HTTP_403_FORBIDDEN,
19
+ json_message={"error": "Website Blocked Change IP or Website Domain."},
20
+ )
21
+ elif len(resp["data"]) > 0:
22
+ return resp
23
+ else:
24
+ return error_handler(
25
+ status_code=status.HTTP_404_NOT_FOUND,
26
+ json_message={"error": "Result not found."},
27
+ )
28
+ return error_handler(
29
+ status_code=status.HTTP_404_NOT_FOUND,
30
+ json_message={"error": "Selected Site Not Available"},
31
+ )
routers/v1/sites_list_router.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, status
2
+ from helper.is_site_available import check_if_site_available
3
+ from helper.error_messages import error_handler
4
+
5
+ router = APIRouter(tags=["Get all sites"])
6
+
7
+
8
+ @router.get("/")
9
+ @router.get("")
10
+ async def get_all_supported_sites():
11
+ all_sites = check_if_site_available("1337x")
12
+ sites_list = [site for site in all_sites.keys() if all_sites[site]["website"]]
13
+ return error_handler(
14
+ status_code=status.HTTP_200_OK,
15
+ json_message={
16
+ "supported_sites": sites_list,
17
+ },
18
+ )
routers/v1/trending_router.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter
2
+ from fastapi import status
3
+ from typing import Optional
4
+ from helper.is_site_available import check_if_site_available
5
+ from helper.error_messages import error_handler
6
+
7
+ router = APIRouter(tags=["Trending Torrents"])
8
+
9
+
10
+ @router.get("/")
11
+ @router.get("")
12
+ async def get_trending(
13
+ site: str,
14
+ limit: Optional[int] = 0,
15
+ category: Optional[str] = None,
16
+ page: Optional[int] = 1,
17
+ ):
18
+ site = site.lower()
19
+ all_sites = check_if_site_available(site)
20
+ category = category.lower() if category is not None else None
21
+ if all_sites:
22
+ limit = (
23
+ all_sites[site]["limit"]
24
+ if limit == 0 or limit > all_sites[site]["limit"]
25
+ else limit
26
+ )
27
+ if all_sites[site]["trending_available"]:
28
+ if not category is None and not all_sites[site]["trending_category"]:
29
+ return error_handler(
30
+ status_code=status.HTTP_404_NOT_FOUND,
31
+ json_message={
32
+ "error": "Search by trending category not available for {}.".format(
33
+ site
34
+ )
35
+ },
36
+ )
37
+ if not category is None and category not in all_sites[site]["categories"]:
38
+ return error_handler(
39
+ status_code=status.HTTP_404_NOT_FOUND,
40
+ json_message={
41
+ "error": "Selected category not available.",
42
+ "available_categories": all_sites[site]["categories"],
43
+ },
44
+ )
45
+ resp = await all_sites[site]["website"]().trending(category, page, limit)
46
+ if resp is None:
47
+ return error_handler(
48
+ status_code=status.HTTP_403_FORBIDDEN,
49
+ json_message={
50
+ "error": "Website Blocked Change IP or Website Domain."
51
+ },
52
+ )
53
+ elif len(resp["data"]) > 0:
54
+ return resp
55
+ else:
56
+ return error_handler(
57
+ status_code=status.HTTP_404_NOT_FOUND,
58
+ json_message={"error": "Result not found."},
59
+ )
60
+ else:
61
+ return error_handler(
62
+ status_code=status.HTTP_404_NOT_FOUND,
63
+ json_message={
64
+ "error": "Trending search not availabe for {}.".format(site)
65
+ },
66
+ )
67
+ return error_handler(
68
+ status_code=status.HTTP_404_NOT_FOUND,
69
+ json_message={"error": "Selected Site Not Available"},
70
+ )
torrents/__init__.py ADDED
File without changes
torrents/bitsearch.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import time
3
+ import aiohttp
4
+ from bs4 import BeautifulSoup
5
+ from helper.html_scraper import Scraper
6
+ from constants.base_url import BITSEARCH
7
+
8
+
9
+ class Bitsearch:
10
+ def __init__(self):
11
+ self.BASE_URL = BITSEARCH
12
+ self.LIMIT = None
13
+
14
+ def _parser(self, htmls):
15
+ try:
16
+ for html in htmls:
17
+ soup = BeautifulSoup(html, "html.parser")
18
+
19
+ my_dict = {"data": []}
20
+ for divs in soup.find_all("li", class_="search-result"):
21
+ info = divs.find("div", class_="info")
22
+ name = info.find("h5", class_="title").find("a").text
23
+ url = info.find("h5", class_="title").find("a")["href"]
24
+ category = info.find("div").find("a", class_="category").text
25
+ if not category:
26
+ continue
27
+ stats = info.find("div", class_="stats").find_all("div")
28
+ if stats:
29
+ downloads = stats[0].text
30
+ size = stats[1].text
31
+ seeders = stats[2].text.strip()
32
+ leechers = stats[3].text.strip()
33
+ date = stats[4].text
34
+ links = divs.find("div", class_="links").find_all("a")
35
+ magnet = links[1]["href"]
36
+ torrent = links[0]["href"]
37
+ my_dict["data"].append(
38
+ {
39
+ "name": name,
40
+ "size": size,
41
+ "seeders": seeders,
42
+ "leechers": leechers,
43
+ "category": category,
44
+ "hash": re.search(
45
+ r"([{a-f\d,A-F\d}]{32,40})\b", magnet
46
+ ).group(0),
47
+ "magnet": magnet,
48
+ "torrent": torrent,
49
+ "url": self.BASE_URL + url,
50
+ "date": date,
51
+ "downloads": downloads,
52
+ }
53
+ )
54
+ if len(my_dict["data"]) == self.LIMIT:
55
+ break
56
+ try:
57
+ total_pages = (
58
+ int(
59
+ soup.select(
60
+ "body > main > div.container.mt-2 > div > div:nth-child(1) > div > span > b"
61
+ )[0].text
62
+ )
63
+ / 20
64
+ ) # !20 search result available on each page
65
+ total_pages = (
66
+ total_pages + 1
67
+ if type(total_pages) == float
68
+ else total_pages
69
+ if int(total_pages) > 0
70
+ else total_pages + 1
71
+ )
72
+
73
+ current_page = int(
74
+ soup.find("div", class_="pagination")
75
+ .find("a", class_="active")
76
+ .text
77
+ )
78
+ my_dict["current_page"] = current_page
79
+ my_dict["total_pages"] = int(total_pages)
80
+ except:
81
+ ...
82
+ return my_dict
83
+ except:
84
+ return None
85
+
86
+ async def search(self, query, page, limit):
87
+ async with aiohttp.ClientSession() as session:
88
+ start_time = time.time()
89
+ self.LIMIT = limit
90
+ url = self.BASE_URL + "/search?q={}&page={}".format(query, page)
91
+ return await self.parser_result(start_time, url, session)
92
+
93
+ async def parser_result(self, start_time, url, session):
94
+ html = await Scraper().get_all_results(session, url)
95
+ results = self._parser(html)
96
+ if results is not None:
97
+ results["time"] = time.time() - start_time
98
+ results["total"] = len(results["data"])
99
+ return results
100
+ return results
101
+
102
+ async def trending(self, category, page, limit):
103
+ async with aiohttp.ClientSession() as session:
104
+ start_time = time.time()
105
+ self.LIMIT = limit
106
+ url = self.BASE_URL + "/trending"
107
+ return await self.parser_result(start_time, url, session)
torrents/glodls.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import aiohttp
3
+ from bs4 import BeautifulSoup
4
+ from helper.html_scraper import Scraper
5
+ from constants.base_url import GLODLS
6
+
7
+
8
+ class Glodls:
9
+ def __init__(self):
10
+ self.BASE_URL = GLODLS
11
+ self.LIMIT = None
12
+
13
+ def _parser(self, htmls):
14
+ try:
15
+ for html in htmls:
16
+ soup = BeautifulSoup(html, "html.parser")
17
+
18
+ my_dict = {"data": []}
19
+ for tr in soup.find_all("tr", class_="t-row")[0:-1:2]:
20
+ td = tr.find_all("td")
21
+ name = td[1].find_all("a")[-1].find("b").text
22
+ url = self.BASE_URL + td[1].find_all("a")[-1]["href"]
23
+ torrent = self.BASE_URL + td[2].find("a")["href"]
24
+ magnet = td[3].find("a")["href"]
25
+ size = td[4].text
26
+ seeders = td[5].find("font").find("b").text
27
+ leechers = td[6].find("font").find("b").text
28
+ try:
29
+ uploader = td[7].find("a").find("b").find("font").text
30
+ except:
31
+ uploader = ""
32
+ my_dict["data"].append(
33
+ {
34
+ "name": name,
35
+ "size": size,
36
+ "uploader": uploader,
37
+ "seeders": seeders,
38
+ "leechers": leechers,
39
+ "magnet": magnet,
40
+ "torrent": torrent,
41
+ "url": self.BASE_URL + url,
42
+ }
43
+ )
44
+ if len(my_dict["data"]) == self.LIMIT:
45
+ break
46
+ try:
47
+ pagination = soup.find("div", class_="pagination")
48
+ total_pages = pagination.find_all("a")[-2]["href"]
49
+ total_pages = total_pages.split("=")[-1]
50
+ my_dict["total_pages"] = int(total_pages) + 1
51
+ except:
52
+ ...
53
+ return my_dict
54
+ except:
55
+ return None
56
+
57
+ async def search(self, query, page, limit):
58
+ async with aiohttp.ClientSession() as session:
59
+ start_time = time.time()
60
+ self.LIMIT = limit
61
+ url = (
62
+ self.BASE_URL
63
+ + "/search_results.php?search={}&cat=0&incldead=0&inclexternal=0&lang=0&sort=seeders&order=desc&page={}".format(
64
+ query, page - 1
65
+ )
66
+ )
67
+ return await self.parser_result(start_time, url, session)
68
+
69
+ async def parser_result(self, start_time, url, session):
70
+ html = await Scraper().get_all_results(session, url)
71
+ results = self._parser(html)
72
+ if results is not None:
73
+ results["time"] = time.time() - start_time
74
+ results["total"] = len(results["data"])
75
+ return results
76
+ return results
77
+
78
+ async def trending(self, category, page, limit):
79
+ async with aiohttp.ClientSession() as session:
80
+ start_time = time.time()
81
+ self.LIMIT = limit
82
+ url = self.BASE_URL + "/today.php"
83
+ return await self.parser_result(start_time, url, session)
84
+
85
+ async def recent(self, category, page, limit):
86
+ async with aiohttp.ClientSession() as session:
87
+ start_time = time.time()
88
+ self.LIMIT = limit
89
+ url = self.BASE_URL + "/search.php"
90
+ return await self.parser_result(start_time, url, session)
torrents/kickass.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import re
3
+ import time
4
+ import aiohttp
5
+ from bs4 import BeautifulSoup
6
+ from helper.asyncioPoliciesFix import decorator_asyncio_fix
7
+ from helper.html_scraper import Scraper
8
+ from constants.base_url import KICKASS
9
+ from constants.headers import HEADER_AIO
10
+
11
+
12
+ class Kickass:
13
+ def __init__(self):
14
+ self.BASE_URL = KICKASS
15
+ self.LIMIT = None
16
+
17
+ @decorator_asyncio_fix
18
+ async def _individual_scrap(self, session, url, obj):
19
+ try:
20
+ async with session.get(url, headers=HEADER_AIO) as res:
21
+ html = await res.text(encoding="ISO-8859-1")
22
+ soup = BeautifulSoup(html, "html.parser")
23
+ try:
24
+ poster = soup.find("a", class_="movieCover")
25
+ if poster:
26
+ poster = poster.find("img")["src"]
27
+ obj["poster"] = self.BASE_URL + poster
28
+ imgs = (soup.find("div", class_="data")).find_all("img")
29
+ if imgs and len(imgs) > 0:
30
+ obj["screenshot"] = [img["src"] for img in imgs]
31
+ magnet_and_torrent = soup.find_all("a", class_="kaGiantButton")
32
+ magnet = magnet_and_torrent[0]["href"]
33
+ obj["hash"] = re.search(
34
+ r"([{a-f\d,A-F\d}]{32,40})\b", magnet
35
+ ).group(0)
36
+ obj["magnet"] = magnet
37
+ except:
38
+ ...
39
+ except:
40
+ return None
41
+
42
+ async def _get_torrent(self, result, session, urls):
43
+ tasks = []
44
+ for idx, url in enumerate(urls):
45
+ for obj in result["data"]:
46
+ if obj["url"] == url:
47
+ task = asyncio.create_task(
48
+ self._individual_scrap(session, url, result["data"][idx])
49
+ )
50
+ tasks.append(task)
51
+ await asyncio.gather(*tasks)
52
+ return result
53
+
54
+ def _parser(self, htmls):
55
+ try:
56
+ for html in htmls:
57
+ soup = BeautifulSoup(html, "html.parser")
58
+ list_of_urls = []
59
+ my_dict = {"data": []}
60
+ for tr in soup.select("tr.odd,tr.even"):
61
+ td = tr.find_all("td")
62
+ name = tr.find("a", class_="cellMainLink").text.strip()
63
+ url = self.BASE_URL + tr.find("a", class_="cellMainLink")["href"]
64
+ list_of_urls.append(url)
65
+ if name:
66
+ size = td[1].text.strip()
67
+ seeders = td[4].text.strip()
68
+ leechers = td[5].text.strip()
69
+ uploader = td[2].text.strip()
70
+ date = td[3].text.strip()
71
+
72
+ my_dict["data"].append(
73
+ {
74
+ "name": name,
75
+ "size": size,
76
+ "date": date,
77
+ "seeders": seeders,
78
+ "leechers": leechers,
79
+ "url": url,
80
+ "uploader": uploader,
81
+ }
82
+ )
83
+ if len(my_dict["data"]) == self.LIMIT:
84
+ break
85
+ try:
86
+ pages = soup.find("div", class_="pages")
87
+ current_page = int(pages.find("a", class_="active").text)
88
+ pages = pages.find_all("a")
89
+ total_page = pages[-1].text
90
+ if total_page == ">>":
91
+ total_page = pages[-2].text
92
+ my_dict["current_page"] = current_page
93
+ my_dict["total_pages"] = int(total_page)
94
+ except:
95
+ ...
96
+ return my_dict, list_of_urls
97
+ except:
98
+ return None, None
99
+
100
+ async def search(self, query, page, limit):
101
+ async with aiohttp.ClientSession() as session:
102
+ start_time = time.time()
103
+ self.LIMIT = limit
104
+ url = self.BASE_URL + "/usearch/{}/{}/".format(query, page)
105
+ return await self.parser_result(start_time, url, session)
106
+
107
+ async def parser_result(self, start_time, url, session):
108
+ htmls = await Scraper().get_all_results(session, url)
109
+ result, urls = self._parser(htmls)
110
+ if result is not None:
111
+ results = await self._get_torrent(result, session, urls)
112
+ results["time"] = time.time() - start_time
113
+ results["total"] = len(results["data"])
114
+ return results
115
+ return result
116
+
117
+ async def trending(self, category, page, limit):
118
+ async with aiohttp.ClientSession() as session:
119
+ start_time = time.time()
120
+ self.LIMIT = limit
121
+ if not category:
122
+ url = self.BASE_URL + "/top-100"
123
+ else:
124
+ if category == "tv":
125
+ category == "television"
126
+ elif category == "apps":
127
+ category = "applications"
128
+ url = self.BASE_URL + "/top-100-{}/".format(category)
129
+ return await self.parser_result(start_time, url, session)
130
+
131
+ async def recent(self, category, page, limit):
132
+ async with aiohttp.ClientSession() as session:
133
+ start_time = time.time()
134
+ self.LIMIT = limit
135
+ if not category:
136
+ url = self.BASE_URL + "/new/"
137
+ else:
138
+ url = self.BASE_URL + "/{}/".format(category)
139
+ return await self.parser_result(start_time, url, session)
torrents/libgen.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import time
3
+ import aiohttp
4
+ from bs4 import BeautifulSoup
5
+ from helper.asyncioPoliciesFix import decorator_asyncio_fix
6
+ from helper.html_scraper import Scraper
7
+ from constants.base_url import LIBGEN
8
+ from constants.headers import HEADER_AIO
9
+
10
+
11
+ class Libgen:
12
+ def __init__(self):
13
+ self.BASE_URL = LIBGEN
14
+ self.LIMIT = None
15
+
16
+ @decorator_asyncio_fix
17
+ async def _individual_scrap(self, session, url, obj, sem):
18
+ async with sem:
19
+ try:
20
+ async with session.get(url, headers=HEADER_AIO) as res:
21
+ html = await res.text(encoding="ISO-8859-1")
22
+ soup = BeautifulSoup(html, "html.parser")
23
+ try:
24
+ x = soup.find_all("a")
25
+ for a in x:
26
+ if a.text == "One-filetorrent":
27
+ if a["href"] != "#":
28
+ obj["torrent"] = self.BASE_URL + a["href"]
29
+ poster = soup.find_all("img")[0]
30
+
31
+ if poster:
32
+ obj["poster"] = "http://library.lol" + poster["src"]
33
+ except:
34
+ ...
35
+ except:
36
+ return None
37
+
38
+ async def _get_torrent(self, result, session, urls):
39
+ tasks = []
40
+ sem = asyncio.Semaphore(3)
41
+ for idx, url in enumerate(urls):
42
+ for obj in result["data"]:
43
+ if obj["url"] == url:
44
+ task = asyncio.create_task(
45
+ self._individual_scrap(session, url, result["data"][idx], sem)
46
+ )
47
+ tasks.append(task)
48
+ await asyncio.gather(*tasks)
49
+ return result
50
+
51
+ def _parser(self, htmls):
52
+ try:
53
+ for html in htmls:
54
+ soup = BeautifulSoup(html, "html.parser")
55
+ list_of_urls = []
56
+ my_dict = {"data": []}
57
+ trs = soup.select("[valign=top]")
58
+ for tr in trs[1:]:
59
+ td = tr.find_all("td")
60
+ id = td[0].text
61
+ authors = []
62
+ author = td[1].find_all("a")
63
+ for a in author:
64
+ authors.append(a.text.strip())
65
+ name_and_url = td[2].find("a")
66
+ name = name_and_url.text
67
+ url = self.BASE_URL + "/" + name_and_url["href"]
68
+ list_of_urls.append(url)
69
+ publisher = td[3].text
70
+ year = td[4].text
71
+ pages = None
72
+ try:
73
+ pages = td[5].text
74
+ except:
75
+ ...
76
+ language = td[6].text
77
+ size = td[7].text
78
+ extension = td[8].text
79
+
80
+ my_dict["data"].append(
81
+ {
82
+ "id": id,
83
+ "authors": authors,
84
+ "name": name,
85
+ "publisher": publisher,
86
+ "year": year,
87
+ "pages": pages,
88
+ "language": language,
89
+ "size": size,
90
+ "extension": extension,
91
+ "url": url,
92
+ }
93
+ )
94
+ if len(my_dict["data"]) == self.LIMIT:
95
+ break
96
+ return my_dict, list_of_urls
97
+ except:
98
+ return None, None
99
+
100
+ async def search(self, query, page, limit):
101
+ async with aiohttp.ClientSession() as session:
102
+ start_time = time.time()
103
+ self.LIMIT = limit
104
+ url = (
105
+ self.BASE_URL
106
+ + "/search.php?req={}&lg_topic=libgen&open=0&view=simple&res=100&phrase=1&column=def".format(
107
+ query
108
+ )
109
+ )
110
+ return await self.parser_result(start_time, url, session)
111
+
112
+ async def parser_result(self, start_time, url, session):
113
+ htmls = await Scraper().get_all_results(session, url)
114
+ result, urls = self._parser(htmls)
115
+ if result is not None:
116
+ results = await self._get_torrent(result, session, urls)
117
+ results["time"] = time.time() - start_time
118
+ results["total"] = len(results["data"])
119
+ return results
120
+ return result
torrents/limetorrents.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import re
3
+ import time
4
+ import aiohttp
5
+ from bs4 import BeautifulSoup
6
+ from helper.asyncioPoliciesFix import decorator_asyncio_fix
7
+ from helper.html_scraper import Scraper
8
+ from constants.base_url import LIMETORRENT
9
+ from constants.headers import HEADER_AIO
10
+
11
+
12
+ class Limetorrent:
13
+ def __init__(self):
14
+ self.BASE_URL = LIMETORRENT
15
+ self.LIMIT = None
16
+
17
+ @decorator_asyncio_fix
18
+ async def _individual_scrap(self, session, url, obj):
19
+ try:
20
+ async with session.get(url, headers=HEADER_AIO) as res:
21
+ html = await res.text(encoding="ISO-8859-1")
22
+ soup = BeautifulSoup(html, "html.parser")
23
+ try:
24
+ a_tag = soup.find_all("a", class_="csprite_dltorrent")
25
+ obj["torrent"] = a_tag[0]["href"]
26
+ obj["magnet"] = a_tag[-1]["href"]
27
+ obj["hash"] = re.search(
28
+ r"([{a-f\d,A-F\d}]{32,40})\b", obj["magnet"]
29
+ ).group(0)
30
+ except:
31
+ ...
32
+ except:
33
+ return None
34
+
35
+ async def _get_torrent(self, result, session, urls):
36
+ tasks = []
37
+ for idx, url in enumerate(urls):
38
+ for obj in result["data"]:
39
+ if obj["url"] == url:
40
+ task = asyncio.create_task(
41
+ self._individual_scrap(session, url, result["data"][idx])
42
+ )
43
+ tasks.append(task)
44
+ await asyncio.gather(*tasks)
45
+ return result
46
+
47
+ def _parser(self, htmls, idx=0):
48
+ try:
49
+ for html in htmls:
50
+ soup = BeautifulSoup(html, "html.parser")
51
+ list_of_urls = []
52
+ my_dict = {"data": []}
53
+
54
+ for tr in soup.find_all("tr")[idx:]:
55
+ td = tr.find_all("td")
56
+ if len(td) == 0:
57
+ continue
58
+ name = td[0].get_text(strip=True)
59
+ url = self.BASE_URL + td[0].find_all("a")[-1]["href"]
60
+ list_of_urls.append(url)
61
+ added_on_and_category = td[1].get_text(strip=True)
62
+ date = (added_on_and_category.split("-")[0]).strip()
63
+ category = (added_on_and_category.split("in")[-1]).strip()
64
+ size = td[2].text
65
+ seeders = td[3].text
66
+ leechers = td[4].text
67
+ my_dict["data"].append(
68
+ {
69
+ "name": name,
70
+ "size": size,
71
+ "date": date,
72
+ "category": category if category != date else None,
73
+ "seeders": seeders,
74
+ "leechers": leechers,
75
+ "url": url,
76
+ }
77
+ )
78
+ if len(my_dict["data"]) == self.LIMIT:
79
+ break
80
+ try:
81
+ div = soup.find("div", class_="search_stat")
82
+ current_page = int(div.find("span", class_="active").text)
83
+ total_page = int((div.find_all("a"))[-2].text)
84
+ if current_page > total_page:
85
+ total_page = current_page
86
+ my_dict["current_page"] = current_page
87
+ my_dict["total_pages"] = total_page
88
+ except:
89
+ ...
90
+ return my_dict, list_of_urls
91
+ except:
92
+ return None, None
93
+
94
+ async def search(self, query, page, limit):
95
+ async with aiohttp.ClientSession() as session:
96
+ start_time = time.time()
97
+ self.LIMIT = limit
98
+ url = self.BASE_URL + "/search/all/{}//{}".format(query, page)
99
+ return await self.parser_result(start_time, url, session, idx=5)
100
+
101
+ async def parser_result(self, start_time, url, session, idx=0):
102
+ htmls = await Scraper().get_all_results(session, url)
103
+ result, urls = self._parser(htmls, idx)
104
+ if result is not None:
105
+ results = await self._get_torrent(result, session, urls)
106
+ results["time"] = time.time() - start_time
107
+ results["total"] = len(results["data"])
108
+ return results
109
+ return result
110
+
111
+ async def trending(self, category, page, limit):
112
+ async with aiohttp.ClientSession() as session:
113
+ start_time = time.time()
114
+ self.LIMIT = limit
115
+ url = self.BASE_URL + "/top100"
116
+ return await self.parser_result(start_time, url, session)
117
+
118
+ async def recent(self, category, page, limit):
119
+ async with aiohttp.ClientSession() as session:
120
+ start_time = time.time()
121
+ self.LIMIT = limit
122
+ if not category:
123
+ url = self.BASE_URL + "/latest100"
124
+ else:
125
+ category = (category).capitalize()
126
+ if category == "Apps":
127
+ category = "Applications"
128
+ elif category == "Tv":
129
+ category = "TV-shows"
130
+ url = self.BASE_URL + "/browse-torrents/{}/date/{}/".format(
131
+ category, page
132
+ )
133
+ return await self.parser_result(start_time, url, session)
torrents/magnet_dl.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import re
3
+ import time
4
+ import aiohttp
5
+ import cloudscraper
6
+ import requests
7
+ from bs4 import BeautifulSoup
8
+ from constants.base_url import MAGNETDL
9
+
10
+
11
+ class Magnetdl:
12
+ def __init__(self):
13
+ self.BASE_URL = MAGNETDL
14
+ self.LIMIT = None
15
+
16
+ def _parser(self, htmls):
17
+ try:
18
+ for html in htmls:
19
+ soup = BeautifulSoup(html, "html.parser")
20
+
21
+ my_dict = {"data": []}
22
+ table = soup.find("table", class_="download")
23
+ for tr in soup.find_all("tr"):
24
+ td = tr.find_all("td")
25
+ if len(td) > 1:
26
+ name = td[1].find("a").get_text(strip=True)
27
+ if name != "":
28
+ magnet = td[0].find("a")["href"]
29
+ try:
30
+ size = td[5].get_text(strip=True)
31
+ except IndexError:
32
+ size = None
33
+ url = td[1].find("a")["href"]
34
+ date = td[2].get_text(strip=True)
35
+ seeders = td[6].get_text(strip=True)
36
+ leechers = td[7].get_text(strip=True)
37
+ category = td[3].text
38
+ my_dict["data"].append(
39
+ {
40
+ "name": name,
41
+ "size": size,
42
+ "seeders": seeders,
43
+ "leechers": leechers,
44
+ "category": category,
45
+ "hash": re.search(
46
+ r"([{a-f\d,A-F\d}]{32,40})\b", magnet
47
+ ).group(0),
48
+ "magnet": magnet,
49
+ "url": self.BASE_URL + url,
50
+ "date": date,
51
+ }
52
+ )
53
+ if len(my_dict["data"]) == self.LIMIT:
54
+ break
55
+ total_results = soup.find("div", id="footer").text.replace(",", "")
56
+ current_page = int(
57
+ (re.search(r"Page\s\d*", total_results).group(0)).replace(
58
+ "Page ", ""
59
+ )
60
+ )
61
+ total_pages = (
62
+ int(
63
+ (
64
+ (re.search(r"Found\s\d*", total_results).group(0)).replace(
65
+ "Found ", ""
66
+ )
67
+ )
68
+ )
69
+ // 40
70
+ )
71
+ my_dict["current_page"] = current_page
72
+ my_dict["total_pages"] = (
73
+ 30
74
+ if total_pages > 30
75
+ else total_pages
76
+ if total_pages != 0
77
+ else total_pages + 1
78
+ )
79
+ return my_dict
80
+ except:
81
+ return None
82
+
83
+ async def _get_html(self, session, url):
84
+ session = cloudscraper.create_scraper(sess=session)
85
+ try:
86
+ return session.get(url).text
87
+ except:
88
+ return None
89
+
90
+ async def _get_all_results(self, session, url):
91
+ return await asyncio.gather(asyncio.create_task(self._get_html(session, url)))
92
+
93
+ async def search(self, query, page, limit):
94
+ async with aiohttp.ClientSession() as session:
95
+ start_time = time.time()
96
+ self.LIMIT = limit
97
+ query = requests.utils.unquote(query)
98
+ query = query.split(" ")
99
+ query = "-".join(query)
100
+ url = self.BASE_URL + "/{}/{}/se/desc/{}/".format(query[0], query, page)
101
+ return await self.parser_result(start_time, url, session)
102
+
103
+ async def parser_result(self, start_time, url, session):
104
+ data = await self._get_all_results(session, url)
105
+ results = self._parser(data)
106
+ if results is not None:
107
+ results["time"] = time.time() - start_time
108
+ results["total"] = len(results["data"])
109
+ return results
110
+ return results
111
+
112
+ async def recent(self, category, page, limit):
113
+ async with aiohttp.ClientSession() as session:
114
+ start_time = time.time()
115
+ self.LIMIT = limit
116
+ if not category:
117
+ url = self.BASE_URL + "/download/movies/{}".format(page)
118
+ else:
119
+ if category == "books":
120
+ category = "e-books"
121
+ url = self.BASE_URL + "/download/{}/{}/".format(category, page)
122
+ return await self.parser_result(start_time, url, session)
123
+
124
+ #! maximum page in category is 30
torrents/nyaa_si.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import time
3
+ import aiohttp
4
+ from bs4 import BeautifulSoup
5
+ from helper.html_scraper import Scraper
6
+ from constants.base_url import NYAASI
7
+
8
+
9
+ class NyaaSi:
10
+ def __init__(self):
11
+ self.BASE_URL = NYAASI
12
+ self.LIMIT = None
13
+
14
+ def _parser(self, htmls):
15
+ try:
16
+ for html in htmls:
17
+ soup = BeautifulSoup(html, "html.parser")
18
+
19
+ my_dict = {"data": []}
20
+ for tr in (soup.find("table")).find_all("tr")[1:]:
21
+ td = tr.find_all("td")
22
+ name = td[1].find_all("a")[-1].text
23
+ url = td[1].find_all("a")[-1]["href"]
24
+ magnet_and_torrent = td[2].find_all("a")
25
+ magnet = magnet_and_torrent[-1]["href"]
26
+ torrent = self.BASE_URL + magnet_and_torrent[0]["href"]
27
+ size = td[3].text
28
+ date = td[4].text
29
+ seeders = td[5].text
30
+ leechers = td[6].text
31
+ downloads = td[7].text
32
+ category = td[0].find("a")["title"].split("-")[0].strip()
33
+ my_dict["data"].append(
34
+ {
35
+ "name": name,
36
+ "size": size,
37
+ "seeders": seeders,
38
+ "leechers": leechers,
39
+ "category": category,
40
+ "hash": re.search(
41
+ r"([{a-f\d,A-F\d}]{32,40})\b", magnet
42
+ ).group(0),
43
+ "magnet": magnet,
44
+ "torrent": torrent,
45
+ "url": self.BASE_URL + url,
46
+ "date": date,
47
+ "downloads": downloads,
48
+ }
49
+ )
50
+ if len(my_dict["data"]) == self.LIMIT:
51
+ break
52
+
53
+ try:
54
+ ul = soup.find("ul", class_="pagination")
55
+ tpages = ul.find_all("a")[-2].text
56
+ current_page = (ul.find("li", class_="active")).find("a").text
57
+ my_dict["current_page"] = int(current_page)
58
+ my_dict["total_pages"] = int(tpages)
59
+ except:
60
+ my_dict["current_page"] = None
61
+ my_dict["total_pages"] = None
62
+ return my_dict
63
+ except:
64
+ return None
65
+
66
+ async def search(self, query, page, limit):
67
+ async with aiohttp.ClientSession() as session:
68
+ start_time = time.time()
69
+ self.LIMIT = limit
70
+ url = self.BASE_URL + "/?f=0&c=0_0&q={}&p={}".format(query, page)
71
+ return await self.parser_result(start_time, url, session)
72
+
73
+ async def parser_result(self, start_time, url, session):
74
+ html = await Scraper().get_all_results(session, url)
75
+ results = self._parser(html)
76
+ if results is not None:
77
+ results["time"] = time.time() - start_time
78
+ results["total"] = len(results["data"])
79
+ return results
80
+ return results
81
+
82
+ async def recent(self, category, page, limit):
83
+ async with aiohttp.ClientSession() as session:
84
+ start_time = time.time()
85
+ self.LIMIT = limit
86
+ url = self.BASE_URL
87
+ return await self.parser_result(start_time, url, session)
torrents/pirate_bay.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import time
3
+ import aiohttp
4
+ from bs4 import BeautifulSoup
5
+ from helper.html_scraper import Scraper
6
+ from constants.base_url import PIRATEBAY
7
+
8
+
9
+ class PirateBay:
10
+ def __init__(self):
11
+ self.BASE_URL = PIRATEBAY
12
+ self.LIMIT = None
13
+
14
+ def _parser(self, htmls):
15
+ try:
16
+ for html in htmls:
17
+ soup = BeautifulSoup(html, "html.parser")
18
+
19
+ my_dict = {"data": []}
20
+ for tr in soup.find_all("tr")[1:]:
21
+ td = tr.find_all("td")
22
+ try:
23
+ name = td[1].find("a").text
24
+ except:
25
+ name = None
26
+ if name:
27
+ url = td[1].find("a")["href"]
28
+ magnet = td[3].find_all("a")[0]["href"]
29
+ size = td[4].text.strip()
30
+ seeders = td[5].text
31
+ leechers = td[6].text
32
+ category = td[0].find_all("a")[0].text
33
+ uploader = td[7].text
34
+ dateUploaded = td[2].text
35
+
36
+ my_dict["data"].append(
37
+ {
38
+ "name": name,
39
+ "size": size,
40
+ "seeders": seeders,
41
+ "leechers": leechers,
42
+ "category": category,
43
+ "uploader": uploader,
44
+ "url": url,
45
+ "date": dateUploaded,
46
+ "hash": re.search(
47
+ r"([{a-f\d,A-F\d}]{32,40})\b", magnet
48
+ ).group(0),
49
+ "magnet": magnet,
50
+ }
51
+ )
52
+ if len(my_dict["data"]) == self.LIMIT:
53
+ break
54
+ last_tr = soup.find_all("tr")[-1]
55
+ check_if_pagination_available = last_tr.find("td").find("center")
56
+ if not check_if_pagination_available:
57
+ current_page = last_tr.find("td").find("b").text
58
+ my_dict["current_page"] = int(current_page)
59
+ my_dict["total_pages"] = int(
60
+ last_tr.find("td").find_all("a")[-2].text
61
+ )
62
+ return my_dict
63
+ except:
64
+ return None
65
+
66
+ async def search(self, query, page, limit):
67
+ async with aiohttp.ClientSession() as session:
68
+ start_time = time.time()
69
+ self.LIMIT = limit
70
+ url = self.BASE_URL + "/search/{}/{}/99/0".format(query, page)
71
+ return await self.parser_result(start_time, url, session)
72
+
73
+ async def parser_result(self, start_time, url, session):
74
+ html = await Scraper().get_all_results(session, url)
75
+ results = self._parser(html)
76
+ if results is not None:
77
+ results["time"] = time.time() - start_time
78
+ results["total"] = len(results["data"])
79
+ return results
80
+ return results
81
+
82
+ async def trending(self, category, page, limit):
83
+ async with aiohttp.ClientSession() as session:
84
+ start_time = time.time()
85
+ self.LIMIT = limit
86
+ url = self.BASE_URL + "/top/all"
87
+ return await self.parser_result(start_time, url, session)
88
+
89
+ async def recent(self, category, page, limit):
90
+ async with aiohttp.ClientSession() as session:
91
+ start_time = time.time()
92
+ self.LIMIT = limit
93
+ if not category:
94
+ url = self.BASE_URL + "/recent"
95
+ else:
96
+ url = self.BASE_URL + "/{}/latest/".format(category)
97
+ return await self.parser_result(start_time, url, session)
torrents/torlock.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import re
3
+ import time
4
+ import aiohttp
5
+ from bs4 import BeautifulSoup
6
+ from helper.asyncioPoliciesFix import decorator_asyncio_fix
7
+ from helper.html_scraper import Scraper
8
+ from constants.base_url import TORLOCK
9
+ from constants.headers import HEADER_AIO
10
+
11
+
12
+ class Torlock:
13
+ def __init__(self):
14
+ self.BASE_URL = TORLOCK
15
+ self.LIMIT = None
16
+
17
+ @decorator_asyncio_fix
18
+ async def _individual_scrap(self, session, url, obj):
19
+ try:
20
+ async with session.get(url, headers=HEADER_AIO) as res:
21
+ html = await res.text(encoding="ISO-8859-1")
22
+ soup = BeautifulSoup(html, "html.parser")
23
+ try:
24
+ tm = soup.find_all("a")
25
+ magnet = tm[20]["href"]
26
+ torrent = tm[23]["href"]
27
+ try:
28
+ obj["poster"] = soup.find_all("img", class_="img-responsive")[
29
+ 0
30
+ ]["src"]
31
+ except:
32
+ ...
33
+ if str(magnet).startswith("magnet") and str(torrent).endswith(
34
+ "torrent"
35
+ ):
36
+ obj["torrent"] = torrent
37
+ obj["magnet"] = magnet
38
+ obj["hash"] = re.search(
39
+ r"([{a-f\d,A-F\d}]{32,40})\b", magnet
40
+ ).group(0)
41
+ obj["category"] = tm[25].text
42
+ imgs = soup.select(".tab-content img.img-fluid")
43
+ if imgs and len(imgs) > 0:
44
+ obj["screenshot"] = [img["src"] for img in imgs]
45
+ else:
46
+ del obj
47
+ except IndexError:
48
+ ...
49
+ except:
50
+ return None
51
+
52
+ async def _get_torrent(self, result, session, urls):
53
+ tasks = []
54
+ for idx, url in enumerate(urls):
55
+ for obj in result["data"]:
56
+ if obj["url"] == url:
57
+ task = asyncio.create_task(
58
+ self._individual_scrap(session, url, result["data"][idx])
59
+ )
60
+ tasks.append(task)
61
+ await asyncio.gather(*tasks)
62
+ return result
63
+
64
+ def _parser(self, htmls, idx=0):
65
+ try:
66
+ for html in htmls:
67
+ soup = BeautifulSoup(html, "html.parser")
68
+ list_of_urls = []
69
+ my_dict = {"data": []}
70
+
71
+ for tr in soup.find_all("tr")[idx:]:
72
+ td = tr.find_all("td")
73
+ if len(td) == 0:
74
+ continue
75
+ name = td[0].get_text(strip=True)
76
+ if name != "":
77
+ url = td[0].find("a")["href"]
78
+ if url == "":
79
+ break
80
+ url = self.BASE_URL + url
81
+ list_of_urls.append(url)
82
+ size = td[2].get_text(strip=True)
83
+ date = td[1].get_text(strip=True)
84
+ seeders = td[3].get_text(strip=True)
85
+ leechers = td[4].get_text(strip=True)
86
+ my_dict["data"].append(
87
+ {
88
+ "name": name,
89
+ "size": size,
90
+ "date": date,
91
+ "seeders": seeders,
92
+ "leechers": leechers,
93
+ "url": url,
94
+ }
95
+ )
96
+ if len(my_dict["data"]) == self.LIMIT:
97
+ break
98
+ try:
99
+ ul = soup.find("ul", class_="pagination")
100
+ tpages = ul.find_all("a")[-2].text
101
+ current_page = (
102
+ (ul.find("li", class_="active")).find("span").text.split(" ")[0]
103
+ )
104
+ my_dict["current_page"] = int(current_page)
105
+ my_dict["total_pages"] = int(tpages)
106
+ except:
107
+ my_dict["current_page"] = None
108
+ my_dict["total_pages"] = None
109
+ return my_dict, list_of_urls
110
+ except:
111
+ return None, None
112
+
113
+ async def search(self, query, page, limit):
114
+ async with aiohttp.ClientSession() as session:
115
+ start_time = time.time()
116
+ self.LIMIT = limit
117
+ url = self.BASE_URL + "/all/torrents/{}.html?sort=seeds&page={}".format(
118
+ query, page
119
+ )
120
+ return await self.parser_result(start_time, url, session, idx=5)
121
+
122
+ async def parser_result(self, start_time, url, session, idx=0):
123
+ htmls = await Scraper().get_all_results(session, url)
124
+ result, urls = self._parser(htmls, idx)
125
+ if result is not None:
126
+ results = await self._get_torrent(result, session, urls)
127
+ results["time"] = time.time() - start_time
128
+ results["total"] = len(results["data"])
129
+ return results
130
+ return result
131
+
132
+ async def trending(self, category, page, limit):
133
+ async with aiohttp.ClientSession() as session:
134
+ start_time = time.time()
135
+ self.LIMIT = limit
136
+ if not category:
137
+ url = self.BASE_URL
138
+ else:
139
+ if category == "books":
140
+ category = "ebooks"
141
+ url = self.BASE_URL + "/{}.html".format(category)
142
+ return await self.parser_result(start_time, url, session)
143
+
144
+ async def recent(self, category, page, limit):
145
+ async with aiohttp.ClientSession() as session:
146
+ start_time = time.time()
147
+ self.LIMIT = limit
148
+ if not category:
149
+ url = self.BASE_URL + "/fresh.html"
150
+ else:
151
+ if category == "books":
152
+ category = "ebooks"
153
+ url = self.BASE_URL + "/{}/{}/added/desc.html".format(category, page)
154
+ return await self.parser_result(start_time, url, session)
155
+
156
+ #! Maybe impelment Search By Category in Future
torrents/torrentProject.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import time
3
+ import aiohttp
4
+ import requests
5
+ from bs4 import BeautifulSoup
6
+ from helper.asyncioPoliciesFix import decorator_asyncio_fix
7
+ from helper.html_scraper import Scraper
8
+ from constants.base_url import TORRENTPROJECT
9
+ from constants.headers import HEADER_AIO
10
+
11
+
12
+ class TorrentProject:
13
+ def __init__(self):
14
+ self.BASE_URL = TORRENTPROJECT
15
+ self.LIMIT = None
16
+
17
+ @decorator_asyncio_fix
18
+ async def _individual_scrap(self, session, url, obj, sem):
19
+ async with sem:
20
+ try:
21
+ async with session.get(
22
+ url,
23
+ headers=HEADER_AIO,
24
+ ) as res:
25
+ html = await res.text(encoding="ISO-8859-1")
26
+ soup = BeautifulSoup(html, "html.parser")
27
+ try:
28
+ magnet = soup.select_one(
29
+ "#download > div:nth-child(2) > div > a"
30
+ )["href"]
31
+ index_of_magnet = magnet.index("magnet")
32
+ magnet = requests.utils.unquote(magnet[index_of_magnet:])
33
+ obj["magnet"] = magnet
34
+ except:
35
+ ...
36
+ except:
37
+ return None
38
+
39
+ async def _get_torrent(self, result, session, urls):
40
+ tasks = []
41
+ sem = asyncio.Semaphore(3)
42
+ for idx, url in enumerate(urls):
43
+ for obj in result["data"]:
44
+ if obj["url"] == url:
45
+ task = asyncio.create_task(
46
+ self._individual_scrap(session, url, result["data"][idx], sem)
47
+ )
48
+ tasks.append(task)
49
+ await asyncio.gather(*tasks)
50
+ return result
51
+
52
+ def _parser(self, htmls):
53
+ try:
54
+ for html in htmls:
55
+ soup = BeautifulSoup(html, "html.parser")
56
+ list_of_urls = []
57
+ my_dict = {"data": []}
58
+ for div in soup.select("div#similarfiles div")[2:]:
59
+ span = div.find_all("span")
60
+ name = span[0].find("a").text
61
+ url = self.BASE_URL + span[0].find("a")["href"]
62
+ list_of_urls.append(url)
63
+ seeders = span[2].text
64
+ leechers = span[3].text
65
+ date = span[4].text
66
+ size = span[5].text
67
+
68
+ my_dict["data"].append(
69
+ {
70
+ "name": name,
71
+ "size": size,
72
+ "date": date,
73
+ "seeders": seeders,
74
+ "leechers": leechers,
75
+ "url": url,
76
+ }
77
+ )
78
+ if len(my_dict["data"]) == self.LIMIT:
79
+ break
80
+ return my_dict, list_of_urls
81
+ except:
82
+ return None, None
83
+
84
+ async def search(self, query, page, limit):
85
+ async with aiohttp.ClientSession() as session:
86
+ start_time = time.time()
87
+ self.LIMIT = limit
88
+ url = self.BASE_URL + "/?t={}&p={}".format(query, page - 1)
89
+ return await self.parser_result(start_time, url, session)
90
+
91
+ async def parser_result(self, start_time, url, session):
92
+ htmls = await Scraper().get_all_results(session, url)
93
+ result, urls = self._parser(htmls)
94
+ if result is not None:
95
+ results = await self._get_torrent(result, session, urls)
96
+ results["time"] = time.time() - start_time
97
+ results["total"] = len(results["data"])
98
+ return results
99
+ return result
torrents/torrent_galaxy.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import time
3
+ import aiohttp
4
+ from bs4 import BeautifulSoup
5
+ from helper.html_scraper import Scraper
6
+ from constants.base_url import TGX
7
+
8
+
9
+ class TorrentGalaxy:
10
+ def __init__(self):
11
+ self.BASE_URL = TGX
12
+ self.LIMIT = None
13
+
14
+ def _parser_individual(self, html):
15
+ try:
16
+ soup = BeautifulSoup(html[0], "html.parser")
17
+ my_dict = {"data": []}
18
+ root_div = soup.find("div", class_="gluewrapper")
19
+ post_nd_torrents = root_div.find_next("div").find_all("div")
20
+ poster = post_nd_torrents[1].find("img")["data-src"]
21
+ torrentsand_all = post_nd_torrents[4].find_all("a")
22
+ torrent_link = torrentsand_all[0]["href"]
23
+ magnet_link = torrentsand_all[1]["href"]
24
+ direct_link = self.BASE_URL + torrentsand_all[2]["href"]
25
+
26
+ details_root = soup.find("div", class_="gluewrapper").select(
27
+ "div > :nth-child(2) > div > .tprow"
28
+ )
29
+
30
+ name = details_root[0].find_all("div")[-1].get_text(strip=True)
31
+ category = (
32
+ details_root[3].find_all("div")[-1].get_text(strip=True).split(">")[0]
33
+ )
34
+ languagee = details_root[4].find_all("div")[-1].get_text(strip=True)
35
+ size = details_root[5].find_all("div")[-1].get_text(strip=True)
36
+ hash = details_root[6].find_all("div")[-1].get_text(strip=True)
37
+ username = (
38
+ details_root[7]
39
+ .find_all("div")[-1]
40
+ .find("span", class_="username")
41
+ .get_text(strip=True)
42
+ )
43
+ date_up = details_root[8].find_all("div")[-1].get_text(strip=True)
44
+
45
+ btns = details_root[10].find_all("button")
46
+ seeders = btns[0].find("span").get_text(strip=True)
47
+ leechers = btns[1].find("span").get_text(strip=True)
48
+ downloads = btns[2].find("span").get_text(strip=True)
49
+ imdb_id = soup.select_one("#imdbpage")["href"].split("/")[-1]
50
+ genre_list = [
51
+ x.get_text(strip=True) for x in details_root[11].find_all("a")
52
+ ]
53
+ soup.find("div", id="intblockslide").find_all("a")
54
+ imgs = [
55
+ img["href"]
56
+ for img in (soup.find("div", id="intblockslide").find_all("a"))
57
+ if img["href"].endswith((".png", ".jpg", ".jpeg"))
58
+ ]
59
+ my_dict["data"].append(
60
+ {
61
+ "name": name,
62
+ "size": size,
63
+ "seeders": seeders,
64
+ "language": languagee,
65
+ "leechers": leechers,
66
+ "category": category,
67
+ "uploader": username,
68
+ "downloads": downloads,
69
+ "poster": poster,
70
+ "direct_download_link": direct_link,
71
+ "imdb_id": imdb_id,
72
+ "hash": hash,
73
+ "magnet": magnet_link,
74
+ "torrent": torrent_link,
75
+ "screenshot": imgs,
76
+ "genre": genre_list,
77
+ "date": date_up,
78
+ }
79
+ )
80
+ return my_dict
81
+ except:
82
+ return None
83
+
84
+ def _parser(self, htmls):
85
+ try:
86
+ for html in htmls:
87
+ soup = BeautifulSoup(html, "html.parser")
88
+
89
+ my_dict = {"data": []}
90
+ for idx, divs in enumerate(soup.find_all("div", class_="tgxtablerow")):
91
+ div = divs.find_all("div")
92
+ try:
93
+ name = div[4].find("a").get_text(strip=True)
94
+ imdb_url = (div[4].find_all("a"))[-1]["href"]
95
+ except:
96
+ name = (div[1].find("a", class_="txlight")).find("b").text
97
+ imdb_url = (div[1].find_all("a"))[-1]["href"]
98
+
99
+ if name != "":
100
+ try:
101
+ magnet = div[5].find_all("a")[1]["href"]
102
+ torrent = div[5].find_all("a")[0]["href"]
103
+ except:
104
+ magnet = div[3].find_all("a")[1]["href"]
105
+ torrent = div[3].find_all("a")[0]["href"]
106
+ size = soup.select("span.badge.badge-secondary.txlight")[
107
+ idx
108
+ ].text
109
+ try:
110
+ url = div[4].find("a")["href"]
111
+ except:
112
+ url = div[1].find("a", class_="txlight")["href"]
113
+ try:
114
+ date = div[12].get_text(strip=True)
115
+ except:
116
+ date = div[10].get_text(strip=True)
117
+ try:
118
+ seeders_leechers = div[11].find_all("b")
119
+ seeders = seeders_leechers[0].text
120
+ leechers = seeders_leechers[1].text
121
+ except:
122
+ seeders_leechers = div[11].find_all("b")
123
+ seeders = seeders_leechers[0].text
124
+ leechers = seeders_leechers[1].text
125
+ try:
126
+ uploader = (div[7].find("a")).find("span").text
127
+ except:
128
+ uploader = (div[5].find("a")).find("span").text
129
+ try:
130
+ category = (
131
+ div[0].find("small").text.replace("&nbsp", "")
132
+ ).split(":")[0]
133
+ except:
134
+ category = None
135
+ my_dict["data"].append(
136
+ {
137
+ "name": name,
138
+ "size": size,
139
+ "seeders": seeders,
140
+ "leechers": leechers,
141
+ "category": category,
142
+ "uploader": uploader,
143
+ "imdb_id": imdb_url.split("=")[-1],
144
+ "hash": re.search(
145
+ r"([{a-f\d,A-F\d}]{32,40})\b", magnet
146
+ ).group(0),
147
+ "magnet": magnet,
148
+ "torrent": torrent,
149
+ "url": self.BASE_URL + url,
150
+ "date": date,
151
+ }
152
+ )
153
+ if len(my_dict["data"]) == self.LIMIT:
154
+ break
155
+ try:
156
+ ul = soup.find_all("ul", class_="pagination")[-1]
157
+ tpages = ul.find_all("li")[-2]
158
+ my_dict["current_page"] = int(
159
+ soup.select_one("li.page-item.active.txlight a").text.split(
160
+ " "
161
+ )[0]
162
+ )
163
+ my_dict["total_pages"] = int(tpages.find("a").text)
164
+ except:
165
+ my_dict["current_page"] = None
166
+ my_dict["total_pages"] = None
167
+ # ...
168
+ return my_dict
169
+ except:
170
+ return None
171
+
172
+ async def search(self, query, page, limit):
173
+ async with aiohttp.ClientSession() as session:
174
+ start_time = time.time()
175
+ self.LIMIT = limit
176
+ url = (
177
+ self.BASE_URL
178
+ + "/torrents.php?search=+{}&sort=seeders&order=desc&page={}".format(
179
+ query, page - 1
180
+ )
181
+ )
182
+ return await self.parser_result(start_time, url, session)
183
+
184
+ async def get_torrent_by_url(self, torrent_url):
185
+ async with aiohttp.ClientSession() as session:
186
+ start_time = time.time()
187
+ return await self.parser_result(
188
+ start_time, torrent_url, session, is_individual=True
189
+ )
190
+
191
+ async def parser_result(self, start_time, url, session, is_individual=False):
192
+ html = await Scraper().get_all_results(session, url)
193
+ if is_individual:
194
+ results = self._parser_individual(html)
195
+ else:
196
+ results = self._parser(html)
197
+ if results is not None:
198
+ results["time"] = time.time() - start_time
199
+ results["total"] = len(results["data"])
200
+ return results
201
+ return results
202
+
203
+ async def trending(self, category, page, limit):
204
+ async with aiohttp.ClientSession() as session:
205
+ start_time = time.time()
206
+ self.LIMIT = limit
207
+ url = self.BASE_URL
208
+ return await self.parser_result(start_time, url, session)
209
+
210
+ async def recent(self, category, page, limit):
211
+ async with aiohttp.ClientSession() as session:
212
+ start_time = time.time()
213
+ self.LIMIT = limit
214
+ if not category:
215
+ url = self.BASE_URL + "/latest"
216
+ else:
217
+ if category == "documentaries":
218
+ category = "Docus"
219
+ url = (
220
+ self.BASE_URL
221
+ + "/torrents.php?parent_cat={}&sort=id&order=desc&page={}".format(
222
+ str(category).capitalize(), page - 1
223
+ )
224
+ )
225
+ return await self.parser_result(start_time, url, session)
226
+
227
+ #! Maybe Implemented in Future
torrents/torrentfunk.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import time
3
+ import aiohttp
4
+ from bs4 import BeautifulSoup
5
+ from helper.asyncioPoliciesFix import decorator_asyncio_fix
6
+ from helper.html_scraper import Scraper
7
+ from constants.base_url import TORRENTFUNK
8
+ from constants.headers import HEADER_AIO
9
+
10
+
11
+ class TorrentFunk:
12
+ def __init__(self):
13
+ self.BASE_URL = TORRENTFUNK
14
+ self.LIMIT = None
15
+
16
+ @decorator_asyncio_fix
17
+ async def _individual_scrap(self, session, url, obj):
18
+ try:
19
+ async with session.get(url, headers=HEADER_AIO) as res:
20
+ html = await res.text(encoding="ISO-8859-1")
21
+ soup = BeautifulSoup(html, "html.parser")
22
+ try:
23
+ obj["torrent"] = soup.select_one(
24
+ "#right > main > div.content > table:nth-child(3) > tr > td:nth-child(2) > a"
25
+ )["href"]
26
+ obj["category"] = soup.select_one(
27
+ "#right > main > div.content > table:nth-child(7) > tr> td:nth-child(2) > a"
28
+ ).text
29
+ obj["hash"] = soup.select_one(
30
+ "#right > main > div.content > table:nth-child(7) > tr:nth-child(3) > td:nth-child(2)"
31
+ ).text
32
+ except:
33
+ ...
34
+ except:
35
+ return None
36
+
37
+ async def _get_torrent(self, result, session, urls):
38
+ tasks = []
39
+ for idx, url in enumerate(urls):
40
+ for obj in result["data"]:
41
+ if obj["url"] == url:
42
+ task = asyncio.create_task(
43
+ self._individual_scrap(session, url, result["data"][idx])
44
+ )
45
+ tasks.append(task)
46
+ await asyncio.gather(*tasks)
47
+ return result
48
+
49
+ def _parser(self, htmls, idx=1):
50
+ try:
51
+ for html in htmls:
52
+ soup = BeautifulSoup(html, "html.parser")
53
+ list_of_urls = []
54
+ my_dict = {"data": []}
55
+
56
+ for tr in soup.select(".tmain tr")[idx:]:
57
+ td = tr.find_all("td")
58
+ if len(td) == 0:
59
+ continue
60
+ name = td[0].find("a").text
61
+ date = td[1].text
62
+ size = td[2].text
63
+ seeders = td[3].text
64
+ leechers = td[4].text
65
+ uploader = td[5].text
66
+ url = self.BASE_URL + td[0].find("a")["href"]
67
+ list_of_urls.append(url)
68
+ my_dict["data"].append(
69
+ {
70
+ "name": name,
71
+ "size": size,
72
+ "date": date,
73
+ "seeders": seeders,
74
+ "leechers": leechers,
75
+ "uploader": uploader if uploader else None,
76
+ "url": url,
77
+ }
78
+ )
79
+ if len(my_dict["data"]) == self.LIMIT:
80
+ break
81
+ return my_dict, list_of_urls
82
+ except:
83
+ return None, None
84
+
85
+ async def search(self, query, page, limit):
86
+ async with aiohttp.ClientSession() as session:
87
+ start_time = time.time()
88
+ self.LIMIT = limit
89
+ url = self.BASE_URL + "/all/torrents/{}/{}.html".format(query, page)
90
+ return await self.parser_result(start_time, url, session, idx=6)
91
+
92
+ async def parser_result(self, start_time, url, session, idx=1):
93
+ htmls = await Scraper().get_all_results(session, url)
94
+ result, urls = self._parser(htmls, idx)
95
+ if result:
96
+ results = await self._get_torrent(result, session, urls)
97
+ results["time"] = time.time() - start_time
98
+ results["total"] = len(results["data"])
99
+ return results
100
+ return result
101
+
102
+ async def trending(self, category, page, limit):
103
+ async with aiohttp.ClientSession() as session:
104
+ start_time = time.time()
105
+ self.LIMIT = limit
106
+ url = self.BASE_URL
107
+ return await self.parser_result(start_time, url, session)
108
+
109
+ async def recent(self, category, page, limit):
110
+ async with aiohttp.ClientSession() as session:
111
+ start_time = time.time()
112
+ self.LIMIT = limit
113
+ if not category:
114
+ url = self.BASE_URL + "/movies/recent.html"
115
+ else:
116
+ if category == "apps":
117
+ category = "software"
118
+ elif category == "tv":
119
+ category = "television"
120
+ elif category == "books":
121
+ category = "ebooks"
122
+ url = self.BASE_URL + "/{}/recent.html".format(category)
123
+ return await self.parser_result(start_time, url, session)
torrents/x1337.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import re
3
+ import time
4
+ import aiohttp
5
+ from bs4 import BeautifulSoup
6
+ from helper.asyncioPoliciesFix import decorator_asyncio_fix
7
+ from helper.html_scraper import Scraper
8
+ from constants.base_url import X1337
9
+ from constants.headers import HEADER_AIO
10
+
11
+
12
+ class x1337:
13
+ def __init__(self):
14
+ self.BASE_URL = X1337
15
+ self.LIMIT = None
16
+
17
+ @decorator_asyncio_fix
18
+ async def _individual_scrap(self, session, url, obj):
19
+ try:
20
+ async with session.get(url, headers=HEADER_AIO) as res:
21
+ html = await res.text(encoding="ISO-8859-1")
22
+ soup = BeautifulSoup(html, "html.parser")
23
+ try:
24
+ magnet = soup.select_one(".no-top-radius > div > ul > li > a")[
25
+ "href"
26
+ ]
27
+ uls = soup.find_all("ul", class_="list")[1]
28
+ lis = uls.find_all("li")[0]
29
+ imgs = [
30
+ img["data-original"]
31
+ for img in (soup.find("div", id="description")).find_all("img")
32
+ if img["data-original"].endswith((".png", ".jpg", ".jpeg"))
33
+ ]
34
+ files = [
35
+ f.text for f in soup.find("div", id="files").find_all("li")
36
+ ]
37
+ if len(imgs) > 0:
38
+ obj["screenshot"] = imgs
39
+ obj["category"] = lis.find("span").text
40
+ obj["files"] = files
41
+ try:
42
+ poster = soup.select_one("div.torrent-image img")["src"]
43
+ if str(poster).startswith("//"):
44
+ obj["poster"] = "https:" + poster
45
+ elif str(poster).startswith("/"):
46
+ obj["poster"] = self.BASE_URL + poster
47
+ except:
48
+ ...
49
+ obj["magnet"] = magnet
50
+
51
+ obj["hash"] = re.search(
52
+ r"([{a-f\d,A-F\d}]{32,40})\b", magnet
53
+ ).group(0)
54
+ except IndexError:
55
+ ...
56
+ except:
57
+ return None
58
+
59
+ async def _get_torrent(self, result, session, urls):
60
+ tasks = []
61
+ for idx, url in enumerate(urls):
62
+ for obj in result["data"]:
63
+ if obj["url"] == url:
64
+ task = asyncio.create_task(
65
+ self._individual_scrap(session, url, result["data"][idx])
66
+ )
67
+ tasks.append(task)
68
+ await asyncio.gather(*tasks)
69
+ return result
70
+
71
+ def _parser(self, htmls):
72
+ try:
73
+ for html in htmls:
74
+ soup = BeautifulSoup(html, "html.parser")
75
+ list_of_urls = []
76
+ my_dict = {"data": []}
77
+ trs = soup.select("tbody tr")
78
+ for tr in trs:
79
+ td = tr.find_all("td")
80
+ name = td[0].find_all("a")[-1].text
81
+ if name:
82
+ url = self.BASE_URL + td[0].find_all("a")[-1]["href"]
83
+ list_of_urls.append(url)
84
+ seeders = td[1].text
85
+ leechers = td[2].text
86
+ date = td[3].text
87
+ size = td[4].text.replace(seeders, "")
88
+ uploader = td[5].find("a").text
89
+
90
+ my_dict["data"].append(
91
+ {
92
+ "name": name,
93
+ "size": size,
94
+ "date": date,
95
+ "seeders": seeders,
96
+ "leechers": leechers,
97
+ "url": url,
98
+ "uploader": uploader,
99
+ }
100
+ )
101
+ if len(my_dict["data"]) == self.LIMIT:
102
+ break
103
+ try:
104
+ pages = soup.select(".pagination li a")
105
+ my_dict["current_page"] = int(pages[0].text)
106
+ tpages = pages[-1].text
107
+ if tpages == ">>":
108
+ my_dict["total_pages"] = int(pages[-2].text)
109
+ else:
110
+ my_dict["total_pages"] = int(pages[-1].text)
111
+ except:
112
+ ...
113
+ return my_dict, list_of_urls
114
+ except:
115
+ return None, None
116
+
117
+ async def search(self, query, page, limit):
118
+ async with aiohttp.ClientSession() as session:
119
+ self.LIMIT = limit
120
+ start_time = time.time()
121
+ url = self.BASE_URL + "/search/{}/{}/".format(query, page)
122
+ return await self.parser_result(
123
+ start_time, url, session, query=query, page=page
124
+ )
125
+
126
+ async def parser_result(self, start_time, url, session, page, query=None):
127
+ htmls = await Scraper().get_all_results(session, url)
128
+ result, urls = self._parser(htmls)
129
+ if result is not None:
130
+ results = await self._get_torrent(result, session, urls)
131
+ results["time"] = time.time() - start_time
132
+ results["total"] = len(results["data"])
133
+ if query is None:
134
+ return results
135
+ while True:
136
+ if len(results["data"]) >= self.LIMIT:
137
+ results["data"] = results["data"][0 : self.LIMIT]
138
+ results["total"] = len(results["data"])
139
+ return results
140
+ page = page + 1
141
+ url = self.BASE_URL + "/search/{}/{}/".format(query, page)
142
+ htmls = await Scraper().get_all_results(session, url)
143
+ result, urls = self._parser(htmls)
144
+ if result is not None:
145
+ if len(result["data"]) > 0:
146
+ res = await self._get_torrent(result, session, urls)
147
+ for obj in res["data"]:
148
+ results["data"].append(obj)
149
+ try:
150
+ results["current_page"] = res["current_page"]
151
+ except:
152
+ ...
153
+ results["time"] = time.time() - start_time
154
+ results["total"] = len(results["data"])
155
+ else:
156
+ break
157
+ else:
158
+ break
159
+ return results
160
+ return result
161
+
162
+ async def trending(self, category, page, limit):
163
+ async with aiohttp.ClientSession() as session:
164
+ start_time = time.time()
165
+ self.LIMIT = limit
166
+ if not category:
167
+ url = self.BASE_URL + "/home/"
168
+ else:
169
+ url = self.BASE_URL + "/popular-{}".format(category.lower())
170
+ return await self.parser_result(start_time, url, session, page)
171
+
172
+ async def recent(self, category, page, limit):
173
+ async with aiohttp.ClientSession() as session:
174
+ start_time = time.time()
175
+ self.LIMIT = limit
176
+ if not category:
177
+ url = self.BASE_URL + "/trending"
178
+ else:
179
+ url = self.BASE_URL + "/cat/{}/{}/".format(
180
+ str(category).capitalize(), page
181
+ )
182
+ return await self.parser_result(start_time, url, session, page)
183
+
184
+ async def search_by_category(self, query, category, page, limit):
185
+ async with aiohttp.ClientSession() as session:
186
+ start_time = time.time()
187
+ self.LIMIT = limit
188
+ url = self.BASE_URL + "/category-search/{}/{}/{}/".format(
189
+ query, category.capitalize(), page
190
+ )
191
+ return await self.parser_result(start_time, url, session, page, query)
torrents/your_bittorrent.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import time
3
+ import aiohttp
4
+ from bs4 import BeautifulSoup
5
+ from helper.asyncioPoliciesFix import decorator_asyncio_fix
6
+ from helper.html_scraper import Scraper
7
+ from constants.base_url import YOURBITTORRENT
8
+ from constants.headers import HEADER_AIO
9
+
10
+
11
+ class YourBittorrent:
12
+ def __init__(self):
13
+ self.BASE_URL = YOURBITTORRENT
14
+ self.LIMIT = None
15
+
16
+ @decorator_asyncio_fix
17
+ async def _individual_scrap(self, session, url, obj):
18
+ try:
19
+ async with session.get(url, headers=HEADER_AIO) as res:
20
+ html = await res.text(encoding="ISO-8859-1")
21
+ soup = BeautifulSoup(html, "html.parser")
22
+ try:
23
+ container = soup.select_one("div.card-body.container")
24
+ poster = (
25
+ container.find("div")
26
+ .find_all("div")[0]
27
+ .find("picture")
28
+ .find("img")["src"]
29
+ )
30
+ clearfix = soup.find("div", class_="clearfix")
31
+ torrent = clearfix.find("div").find_all("div")[1].find("a")["href"]
32
+ obj["torrent"] = torrent
33
+ obj["poster"] = poster
34
+ except:
35
+ ...
36
+ except:
37
+ return None
38
+
39
+ async def _get_torrent(self, result, session, urls):
40
+ tasks = []
41
+ for idx, url in enumerate(urls):
42
+ for obj in result["data"]:
43
+ if obj["url"] == url:
44
+ task = asyncio.create_task(
45
+ self._individual_scrap(session, url, result["data"][idx])
46
+ )
47
+ tasks.append(task)
48
+ await asyncio.gather(*tasks)
49
+ return result
50
+
51
+ def _parser(self, htmls, idx=1):
52
+ try:
53
+ for html in htmls:
54
+ soup = BeautifulSoup(html, "html.parser")
55
+ list_of_urls = []
56
+ my_dict = {"data": []}
57
+
58
+ for tr in soup.find_all("tr")[idx:]:
59
+ td = tr.find_all("td")
60
+ name = td[1].find("a").get_text(strip=True)
61
+ url = self.BASE_URL + td[1].find("a")["href"]
62
+ list_of_urls.append(url)
63
+ size = td[2].text
64
+ date = td[3].text
65
+ seeders = td[4].text
66
+ leechers = td[5].text
67
+ my_dict["data"].append(
68
+ {
69
+ "name": name,
70
+ "size": size,
71
+ "date": date,
72
+ "seeders": seeders,
73
+ "leechers": leechers,
74
+ "url": url,
75
+ }
76
+ )
77
+ if len(my_dict["data"]) == self.LIMIT:
78
+ break
79
+ return my_dict, list_of_urls
80
+ except:
81
+ return None, None
82
+
83
+ async def search(self, query, page, limit):
84
+ async with aiohttp.ClientSession() as session:
85
+ start_time = time.time()
86
+ self.LIMIT = limit
87
+ url = self.BASE_URL + "/?v=&c=&q={}".format(query)
88
+ return await self.parser_result(start_time, url, session, idx=6)
89
+
90
+ async def parser_result(self, start_time, url, session, idx=1):
91
+ htmls = await Scraper().get_all_results(session, url)
92
+ result, urls = self._parser(htmls, idx)
93
+ if result is not None:
94
+ results = await self._get_torrent(result, session, urls)
95
+ results["time"] = time.time() - start_time
96
+ results["total"] = len(results["data"])
97
+ return results
98
+ return result
99
+
100
+ async def trending(self, category, page, limit):
101
+ async with aiohttp.ClientSession() as session:
102
+ start_time = time.time()
103
+ self.LIMIT = limit
104
+ idx = None
105
+ if not category:
106
+ url = self.BASE_URL + "/top.html"
107
+ idx = 1
108
+ else:
109
+ if category == "books":
110
+ category = "ebooks"
111
+ url = self.BASE_URL + f"/{category}.html"
112
+ idx = 4
113
+ return await self.parser_result(start_time, url, session, idx)
114
+
115
+ async def recent(self, category, page, limit):
116
+ async with aiohttp.ClientSession() as session:
117
+ start_time = time.time()
118
+ self.LIMIT = limit
119
+ idx = None
120
+ if not category:
121
+ url = self.BASE_URL + "/new.html"
122
+ idx = 1
123
+ else:
124
+ if category == "books":
125
+ category = "ebooks"
126
+ url = self.BASE_URL + f"/{category}/latest.html"
127
+ idx = 4
128
+ return await self.parser_result(start_time, url, session, idx)
torrents/yts.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import re
3
+ import time
4
+ import aiohttp
5
+ from bs4 import BeautifulSoup
6
+ from helper.asyncioPoliciesFix import decorator_asyncio_fix
7
+ from helper.html_scraper import Scraper
8
+ from constants.base_url import YTS
9
+ from constants.headers import HEADER_AIO
10
+
11
+
12
+ class Yts:
13
+ def __init__(self):
14
+ self.BASE_URL = YTS
15
+ self.LIMIT = None
16
+
17
+ @decorator_asyncio_fix
18
+ async def _individual_scrap(self, session, url, obj):
19
+ try:
20
+ async with session.get(url, headers=HEADER_AIO) as res:
21
+ html = await res.text(encoding="ISO-8859-1")
22
+ soup = BeautifulSoup(html, "html.parser")
23
+ try:
24
+ name = soup.select_one("div.hidden-xs h1").text
25
+ div = soup.select("div.hidden-xs h2")
26
+ date = div[0].text
27
+ genre = div[1].text.split("/")
28
+ rating = soup.select_one("[itemprop=ratingValue]").text
29
+ poster = (
30
+ soup.find("div", id="movie-poster")
31
+ .find("img")["src"]
32
+ .split("/")
33
+ )
34
+ poster[-1] = poster[-1].replace("medium", "large")
35
+ poster = "/".join(poster)
36
+ description = soup.select("div#synopsis > p")[0].text.strip()
37
+ runtime = (
38
+ soup.select_one(".tech-spec-info")
39
+ .find_all("div", class_="row")[-1]
40
+ .find_all("div")[-3]
41
+ .text.strip()
42
+ )
43
+
44
+ screenshots = soup.find_all("a", class_="screenshot-group")
45
+ screenshots = [a["href"] for a in screenshots]
46
+ torrents = []
47
+ for div in soup.find_all("div", class_="modal-torrent"):
48
+ quality = (
49
+ div.find("div", class_="modal-quality").find("span").text
50
+ )
51
+ all_p = div.find_all("p", class_="quality-size")
52
+ quality_type = all_p[0].text
53
+ size = all_p[1].text
54
+ torrent_link = div.find("a", class_="download-torrent")["href"]
55
+ magnet = div.find("a", class_="magnet-download")["href"]
56
+ hash = re.search(r"([{a-f\d,A-F\d}]{32,40})\b", magnet).group(0)
57
+ torrents.append(
58
+ {
59
+ "quality": quality,
60
+ "type": quality_type,
61
+ "size": size,
62
+ "torrent": torrent_link,
63
+ "magnet": magnet,
64
+ "hash": hash,
65
+ }
66
+ )
67
+ obj["name"] = name
68
+ obj["date"] = date
69
+ obj["genre"] = genre
70
+ obj["rating"] = rating
71
+ obj["poster"] = poster
72
+ obj["description"] = description
73
+ obj["runtime"] = runtime
74
+ obj["screenshot"] = screenshots
75
+ obj["torrents"] = torrents
76
+ except:
77
+ ...
78
+ except:
79
+ return None
80
+
81
+ async def _get_torrent(self, result, session, urls):
82
+ tasks = []
83
+ for idx, url in enumerate(urls):
84
+ for obj in result["data"]:
85
+ if obj["url"] == url:
86
+ task = asyncio.create_task(
87
+ self._individual_scrap(session, url, result["data"][idx])
88
+ )
89
+ tasks.append(task)
90
+ await asyncio.gather(*tasks)
91
+ return result
92
+
93
+ def _parser(self, htmls):
94
+ try:
95
+ for html in htmls:
96
+ soup = BeautifulSoup(html, "html.parser")
97
+ list_of_urls = []
98
+ my_dict = {"data": []}
99
+ for div in soup.find_all("div", class_="browse-movie-wrap"):
100
+ url = div.find("a")["href"]
101
+ list_of_urls.append(url)
102
+ my_dict["data"].append({"url": url})
103
+ if len(my_dict["data"]) == self.LIMIT:
104
+ break
105
+ try:
106
+ ul = soup.find("ul", class_="tsc_pagination")
107
+ current_page = ul.find("a", class_="current").text
108
+ my_dict["current_page"] = int(current_page)
109
+ if current_page:
110
+ total_results = soup.select_one(
111
+ "body > div.main-content > div.browse-content > div > h2 > b"
112
+ ).text
113
+ if "," in total_results:
114
+ total_results = total_results.replace(",", "")
115
+ total_page = int(total_results) / 20
116
+ my_dict["total_pages"] = (
117
+ int(total_page) + 1
118
+ if type(total_page) == float
119
+ else int(total_page)
120
+ )
121
+
122
+ except:
123
+ ...
124
+ return my_dict, list_of_urls
125
+ except:
126
+ return None, None
127
+
128
+ async def search(self, query, page, limit):
129
+ async with aiohttp.ClientSession() as session:
130
+ start_time = time.time()
131
+ self.LIMIT = limit
132
+ if page != 1:
133
+ url = (
134
+ self.BASE_URL
135
+ + "/browse-movies/{}/all/all/0/latest/0/all?page={}".format(
136
+ query, page
137
+ )
138
+ )
139
+ else:
140
+ url = self.BASE_URL + "/browse-movies/{}/all/all/0/latest/0/all".format(
141
+ query
142
+ )
143
+ return await self.parser_result(start_time, url, session)
144
+
145
+ async def parser_result(self, start_time, url, session):
146
+ htmls = await Scraper().get_all_results(session, url)
147
+ result, urls = self._parser(htmls)
148
+ if result is not None:
149
+ results = await self._get_torrent(result, session, urls)
150
+ results["time"] = time.time() - start_time
151
+ results["total"] = len(results["data"])
152
+ return results
153
+ return result
154
+
155
+ async def trending(self, category, page, limit):
156
+ async with aiohttp.ClientSession() as session:
157
+ start_time = time.time()
158
+ self.LIMIT = limit
159
+ url = self.BASE_URL + "/trending-movies"
160
+ return await self.parser_result(start_time, url, session)
161
+
162
+ async def recent(self, category, page, limit):
163
+ async with aiohttp.ClientSession() as session:
164
+ start_time = time.time()
165
+ self.LIMIT = limit
166
+ if page != 1:
167
+ url = (
168
+ self.BASE_URL
169
+ + "/browse-movies/0/all/all/0/featured/0/all?page={}".format(page)
170
+ )
171
+ else:
172
+ url = self.BASE_URL + "/browse-movies/0/all/all/0/featured/0/all"
173
+ return await self.parser_result(start_time, url, session)
torrents/zooqle.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import time
3
+ import aiohttp
4
+ from bs4 import BeautifulSoup
5
+ from helper.html_scraper import Scraper
6
+ from constants.base_url import ZOOQLE
7
+
8
+
9
+ class Zooqle:
10
+ def __init__(self):
11
+ self.BASE_URL = ZOOQLE
12
+ self.LIMIT = None
13
+
14
+ def _parser(self, htmls):
15
+ try:
16
+ for html in htmls:
17
+ soup = BeautifulSoup(html, "html.parser")
18
+
19
+ my_dict = {"data": []}
20
+
21
+ for tr in soup.find_all("tr")[1:]:
22
+ td = tr.find_all("td")
23
+ name = td[1].find("a").get_text(strip=True)
24
+ if name != "":
25
+ magnet = td[2].find_all("a")[1]["href"]
26
+ try:
27
+ size = td[3].find_all("div")[1].text
28
+ except IndexError:
29
+ size = None
30
+ url = td[1].find_all("a")[0]["href"]
31
+ date = td[4].get_text(strip=True)
32
+ seeders_leechers = td[5].find("div")["title"].split("|")
33
+ seeders = seeders_leechers[0].replace("Seeders: ", "").strip()
34
+ leechers = seeders_leechers[1].replace("Leechers: ", "").strip()
35
+ my_dict["data"].append(
36
+ {
37
+ "name": name,
38
+ "size": size,
39
+ "seeders": seeders,
40
+ "leechers": leechers,
41
+ "hash": re.search(
42
+ r"([{a-f\d,A-F\d}]{32,40})\b", magnet
43
+ ).group(0),
44
+ "magnet": magnet,
45
+ "url": self.BASE_URL + url,
46
+ "date": date,
47
+ }
48
+ )
49
+ if len(my_dict["data"]) == self.LIMIT:
50
+ break
51
+ try:
52
+ ul = soup.find("ul", class_="pagination")
53
+ tpages = ul.find_all("a")[-3].text
54
+ current_page = (ul.find("li", class_="active")).find("a").text
55
+ my_dict["current_page"] = int(current_page)
56
+ my_dict["total_pages"] = int(tpages)
57
+ except:
58
+ my_dict["current_page"] = None
59
+ my_dict["total_pages"] = None
60
+ return my_dict
61
+ except:
62
+ return None
63
+
64
+ async def search(self, query, page, limit):
65
+ async with aiohttp.ClientSession() as session:
66
+ start_time = time.time()
67
+ self.LIMIT = limit
68
+ url = self.BASE_URL + "/search?pg={1}&q={0}&v=t".format(query, page)
69
+ return await self.parser_result(start_time, url, session)
70
+
71
+ async def parser_result(self, start_time, url, session):
72
+ html = await Scraper().get_all_results(session, url)
73
+ results = self._parser(html)
74
+ if results is not None:
75
+ results["time"] = time.time() - start_time
76
+ results["total"] = len(results["data"])
77
+ return results
78
+ return results