limitedonly41 commited on
Commit
21059a4
·
verified ·
1 Parent(s): 2a2e11f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +156 -0
app.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import asyncio
3
+ import requests
4
+ from bs4 import BeautifulSoup
5
+ import pandas as pd
6
+ from tqdm import tqdm
7
+ import urllib
8
+ from deep_translator import GoogleTranslator
9
+ from unsloth import FastLanguageModel
10
+ import torch
11
+ import re
12
+
13
+
14
+
15
+ # Define helper functions
16
+ async def fetch_data(url):
17
+ headers = {
18
+ 'Accept': '*/*',
19
+ 'Accept-Language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7',
20
+ 'Connection': 'keep-alive',
21
+ 'Referer': f'{url}',
22
+ 'Sec-Fetch-Dest': 'empty',
23
+ 'Sec-Fetch-Mode': 'cors',
24
+ 'Sec-Fetch-Site': 'cross-site',
25
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36',
26
+ 'sec-ch-ua': '"Google Chrome";v="125", "Chromium";v="125", "Not.A/Brand";v="24"',
27
+ 'sec-ch-ua-mobile': '?0',
28
+ 'sec-ch-ua-platform': '"macOS"',
29
+ }
30
+
31
+ encoding = 'utf-8'
32
+ timeout = 10
33
+
34
+ try:
35
+ def get_content():
36
+ req = urllib.request.Request(url, headers=headers)
37
+ with urllib.request.urlopen(req, timeout=timeout) as response:
38
+ return response.read()
39
+
40
+ response_content = await asyncio.get_event_loop().run_in_executor(None, get_content)
41
+
42
+ soup = BeautifulSoup(response_content, 'html.parser', from_encoding=encoding)
43
+
44
+ title = soup.find('title').text
45
+ description = soup.find('meta', attrs={'name': 'description'})
46
+ if description and "content" in description.attrs:
47
+ description = description.get("content")
48
+ else:
49
+ description = ""
50
+
51
+ keywords = soup.find('meta', attrs={'name': 'keywords'})
52
+ if keywords and "content" in keywords.attrs:
53
+ keywords = keywords.get("content")
54
+ else:
55
+ keywords = ""
56
+
57
+ h1_all = " ".join(h.text for h in soup.find_all('h1'))
58
+ h2_all = " ".join(h.text for h in soup.find_all('h2'))
59
+ h3_all = " ".join(h.text for h in soup.find_all('h3'))
60
+ paragraphs_all = " ".join(p.text for p in soup.find_all('p'))
61
+
62
+ allthecontent = f"{title} {description} {h1_all} {h2_all} {h3_all} {paragraphs_all}"
63
+ allthecontent = allthecontent[:4999]
64
+
65
+ return {
66
+ 'url': url,
67
+ 'title': title,
68
+ 'description': description,
69
+ 'keywords': keywords,
70
+ 'h1': h1_all,
71
+ 'h2': h2_all,
72
+ 'h3': h3_all,
73
+ 'paragraphs': paragraphs_all,
74
+ 'text': allthecontent
75
+ }
76
+ except Exception as e:
77
+ return {
78
+ 'url': url,
79
+ 'title': None,
80
+ 'description': None,
81
+ 'keywords': None,
82
+ 'h1': None,
83
+ 'h2': None,
84
+ 'h3': None,
85
+ 'paragraphs': None,
86
+ 'text': None
87
+ }
88
+
89
+ def concatenate_text(data):
90
+ text_parts = [str(data[col]) for col in ['url', 'title', 'description', 'keywords', 'h1', 'h2', 'h3'] if data[col]]
91
+ text = ' '.join(text_parts)
92
+ text = text.replace(r'\xa0', ' ').replace('\n', ' ').replace('\t', ' ')
93
+ text = re.sub(r'\s{2,}', ' ', text)
94
+ return text
95
+
96
+ def translate_text(text):
97
+ try:
98
+ text = text[:4990]
99
+ translated_text = GoogleTranslator(source='auto', target='en').translate(text)
100
+ return translated_text
101
+ except Exception as e:
102
+ print(f"An error occurred during translation: {e}")
103
+ return None
104
+
105
+ @spaces.GPU()
106
+ def summarize_url(url):
107
+
108
+ # Load the model
109
+ max_seq_length = 2048
110
+ dtype = None
111
+ load_in_4bit = True
112
+
113
+ model, tokenizer = FastLanguageModel.from_pretrained(
114
+ model_name="unsloth/mistral-7b-instruct-v0.3-bnb-4bit",
115
+ max_seq_length=max_seq_length,
116
+ dtype=dtype,
117
+ load_in_4bit=load_in_4bit,
118
+ )
119
+
120
+ # Enable native 2x faster inference
121
+ FastLanguageModel.for_inference(model)
122
+
123
+ result = asyncio.run(fetch_data(url))
124
+ text = concatenate_text(result)
125
+ translated_text = translate_text(text)
126
+
127
+ alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
128
+
129
+ ### Instruction:
130
+ Describe the website text into one word topic:
131
+
132
+ ### Input:
133
+ {}
134
+
135
+ ### Response:
136
+ """
137
+
138
+ prompt = alpaca_prompt.format(translated_text)
139
+ inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
140
+
141
+ outputs = model.generate(inputs.input_ids, max_new_tokens=64, use_cache=True)
142
+ summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
143
+ final_answer = summary.split("### Response:")[1].strip()
144
+ return final_answer
145
+
146
+ # Define Gradio interface
147
+ iface = gr.Interface(
148
+ fn=summarize_url,
149
+ inputs="text",
150
+ outputs="text",
151
+ title="Website Summary Generator",
152
+ description="Enter a URL to get a one-word topic summary of the website content."
153
+ )
154
+
155
+ # Launch the Gradio app
156
+ iface.launch()