File size: 10,935 Bytes
21059a4
2891c11
 
 
 
21059a4
 
 
2891c11
bc11de6
004256b
7b41f37
bc11de6
 
 
4429406
3734fbf
 
bc11de6
 
 
 
2891c11
 
21059a4
2891c11
 
 
 
b421aac
2891c11
21ecc46
 
 
2891c11
 
 
b421aac
bc11de6
 
4429406
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bc11de6
3734fbf
 
 
 
 
 
 
 
 
bc11de6
 
 
 
 
 
 
 
 
 
 
 
 
21059a4
 
bc11de6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2891c11
bc11de6
 
2891c11
21059a4
bc11de6
 
 
 
 
 
 
 
 
 
 
 
 
b421aac
c885544
2891c11
 
b421aac
 
 
 
2891c11
 
b421aac
2891c11
b421aac
 
 
 
 
b2aa395
bc11de6
4429406
 
 
bc11de6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2891c11
 
21059a4
 
bc11de6
 
21059a4
bc11de6
 
21059a4
bc11de6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21059a4
 
 
3734fbf
21059a4
2891c11
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
import gradio as gr
import torch
import spaces
import logging
from deep_translator import GoogleTranslator
import pandas as pd
from tqdm import tqdm
import urllib
from bs4 import BeautifulSoup
import asyncio
from torch.amp import autocast

from curl_cffi.requests import AsyncSession
from tqdm.asyncio import tqdm
from fake_headers import Headers
from urllib.parse import urlparse, urlunparse
from deep_translator import GoogleTranslator


# Limit the number of concurrent workers
CONCURRENT_WORKERS = 5
semaphore = asyncio.Semaphore(CONCURRENT_WORKERS)
# Configure logging to write messages to a file
logging.basicConfig(filename='app.log', level=logging.ERROR)

# Configuration
max_seq_length = 2048
dtype = None  # Auto detection of dtype
load_in_4bit = True  # Use 4-bit quantization to reduce memory usage

# peft_model_name = "limitedonly41/website_qwen2_7b_2"
# peft_model_name = "limitedonly41/website_mistral7b_v02"
peft_model_name = "unsloth/mistral-7b-instruct-v0.3-bnb-4bit"

# Initialize model and tokenizer variables
model = None
tokenizer = None



def get_main_page_url(url):

    try:
        # Parse the given URL
        parsed_url = urlparse(url)
        
        # Construct the main page URL (scheme + netloc)
        
        print(parsed_url.netloc)
        main_page_url = urlunparse((parsed_url.scheme, parsed_url.netloc, '', '', '', ''))
        
        return main_page_url
    except Exception as e:
        return f"Error processing URL: {e}"


def translate_text(text):
    try:
        text = text[:4990]  # Limit the text length to avoid API errors
        translated_text = GoogleTranslator(source='auto', target='en').translate(text)
        return translated_text
    except Exception as e:
        print(f"An error occurred during translation: {e}")
        return None

async def get_page_bs4(url: str, headers):

    wrong_result = {
        'url': None,
        'title': None,
        'description': None,
        'keywords': None,
        'h1': None,
        'h2': None,
        'h3': None,
        'paragraphs': None,
        'text': None,
        'links': None
    }

    async with semaphore:  # Limit concurrency
        async with AsyncSession() as session:

            wrong_result['url'] = url

            try:
                response = await session.get(url, headers=headers, impersonate="chrome", timeout=60, verify=False)
            except:
                try:
                    response = await session.get(url, impersonate="chrome", timeout=60, verify=False)
                except:
                    return wrong_result

            if response.status_code != 200:
                return wrong_result
            soup = BeautifulSoup(response.text, "html.parser")

            try:
                title = soup.find('title').text if soup.find('title') else ''
            except:
                title = ''
            try:
                description = soup.find('meta', attrs={'name': 'description'})
                description = description.get("content") if description else ''
            except:
                description = ''
            try:
                keywords = soup.find('meta', attrs={'name': 'keywords'})
                keywords = keywords.get("content") if keywords else ''
            except:
                keywords = ''
            try:
                h1 = " ".join(h.text for h in soup.find_all('h1'))
            except:
                h1 = ''
            try:
                h2 = " ".join(h.text for h in soup.find_all('h2'))
            except:
                h2 = ''
            try:
                h3 = " ".join(h.text for h in soup.find_all('h3'))
            except:
                h3 = ''
            try:
                paragraphs = " ".join(p.text for p in soup.find_all('p'))
            except:
                paragraphs = ''
            try:
                menu_tags = []
                navs = soup.find_all('nav')
                uls = soup.find_all('ul')
                ols = soup.find_all('ol')
                for tag in navs + uls + ols:
                    menu_tags.extend(tag.find_all('a'))
                menu_items = [{'text': tag.get_text(strip=True), 'href': tag.get('href')} for tag in menu_tags if tag.get_text(strip=True)]
                all_menu_texts = ', '.join([item['text'] for item in menu_items])
            except:
                all_menu_texts = ''

            # all_content = f"{url} {title} {description} {h1} {h2} {h3} {paragraphs}"[:4999]

            all_content = f" {url} {title} {description} {h1} {h2} {h3} {paragraphs} "[:4999]

            if len(all_content) < 150:
                all_content = f" {url} {title} {description} {h1} {h2} {h3} {paragraphs} {all_menu_texts}"[:4999]


            # all_content = f" {url} {title} {description} {keywords} {h1} {h2} {h3} {paragraphs} "[:4999]

            # all_content = f" url: {url} title: {title} description: {description} keywords: {keywords} h1: {h1} h2: {h2} h3: {h3} p: {paragraphs} links: {all_menu_texts}"[:4999]


            result = {
                'url': url,
                'title': title,
                'description': description,
                'keywords': keywords,
                'h1': h1,
                'h2': h2,
                'h3': h3,
                'paragraphs': paragraphs,
                'text': all_content,
                'links': all_menu_texts
            }

            return result


async def main(urls_list):

    headers_list = [Headers(browser="chrome", os="win").generate() for _ in range(len(urls_list) // 5 + 1)]
    tasks = []

    # Assign headers to each task, rotating every 5 URLs
    for i, url in enumerate(urls_list):
        headers = headers_list[i // 5]  # Rotate headers every 5 URLs
        tasks.append(get_page_bs4(url, headers))

    # Use tqdm to show progress
    results = []
    for coro in tqdm(asyncio.as_completed(tasks), total=len(tasks)):
        results.append(await coro)
    return results

def scrape_websites(urls_list):

    try:
        import nest_asyncio
        nest_asyncio.apply()
        loop = asyncio.get_event_loop()
        result_data = loop.run_until_complete(main(urls_list))
        # print(len(result_data))
    except RuntimeError:
        result_data = asyncio.run(main(urls_list))

    return result_data


@spaces.GPU()
def classify_website(url):
    from unsloth import FastLanguageModel  # Import moved to the top for model loading

    global model, tokenizer  # Declare model and tokenizer as global variables

    if model is None or tokenizer is None:
    
        # Load the model and tokenizer during initialization (in the main process)
        model, tokenizer = FastLanguageModel.from_pretrained(
            model_name=peft_model_name,
            max_seq_length=max_seq_length,
            dtype=dtype,
            load_in_4bit=load_in_4bit,
        )
        FastLanguageModel.for_inference(model)  # Enable native 2x faster inference


    main_page_url = get_main_page_url(url)
    
    urls = [main_page_url]
    
    final_ans_dict = {}
    print('before scrape_websites')
    result_data = scrape_websites(urls)

    data = result_data[0]
    
    url = data['url']
    text = data['text']

    try:
        if len(text) < 150:
            # print('Short ', text)
            prediction = 'Short'
            final_ans_dict[url] = prediction
    except:
        # print(translated)
        prediction = 'NotScraped'
        final_ans_dict[url] = prediction

    translated = translate_text(text)
    
    # print(translated)
    try:
        if len(translated) < 150:
            # print(translated)
            pred = 'Short'
            return pred
    except:
        # print(translated)
        pred = 'NotScraped'
        return pred


    example_input = """https://extensionesdepelo.net/ Hair extensions in Valencia ▶ The best prices for natural hair extensions in Valencia Hair Extensions in Valencia ▶ Professional and Natural ⭐ Hair with more volume and length. Perfect Hair Extensions About us Our works Our salon services Hair extensions Hair removal Reviews of satisfied customers Hair palette colors Contacts Fill out the form Over 7 years of experience in hair extensions, we select the color and texture of hair to match your hair so that the hair extensions look natural Gentle and safe hair extensions so that your hair does not suffer. In a few hours, we will transform rare, weak and short hair into luxurious long and healthy hair. We work exclusively with high-quality hair. Thanks to micro and nano capsules, the extensions will be invisible and comfortable. Free consultation before each extension. We use high-quality hair, time-tested

We use small, neat, comfortable
capsules and make an unnoticeable transition
We consult
and answer all
questions before and after extensions
Safe extensions without discomfort in wearing. Due to the correct placement of the capsules, the result of the extension is invisible.  A procedure that requires the attention and accuracy of the master. With proper hair removal, the structure of native hair is not damaged We provide a large selection of colors Ask the master a question and we will answer all your questions We work in the hot Italian extension technique. This technique is the most comfortable because it does not require much self-care. We recommend doing a correction every 2-3 months. With the Italian technique, you can do various hairstyles and even make ponytails. To form capsules, we use good refractory keratin.  We work with a proven supplier of natural Slavic hair. We have a large selection of colors, lengths and hair structures."""



    alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.

### Instruction:
Describe the topic of website from its text :

### ExampleInput:
{}

### ExampleResponse: The website of the master of hair extensions.

### Input:
{}

### Response:"""

    prompt = alpaca_prompt.format(example_input,translated)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    with autocast(device_type='cuda'):
        inputs = tokenizer(prompt, return_tensors="pt").to(device)
        outputs = model.generate(**inputs, max_new_tokens=128, use_cache=True)

    # inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
    # outputs = model.generate(inputs.input_ids, max_new_tokens=64, use_cache=True)

    summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
    final_answer = summary.split("### Response:")[1].strip()
    return f"{main_page_url}: {final_answer}"

# Create a Gradio interface
iface = gr.Interface(
    fn=classify_website,
    inputs="text",
    outputs="text",
    title="Website Topic",
    description="Enter a URL to get a topic summary of the website content."
)

# Launch the interface
iface.launch()