Update app.py
Browse files
app.py
CHANGED
|
@@ -392,13 +392,21 @@ async def adv_web_search(
|
|
| 392 |
"""
|
| 393 |
try:
|
| 394 |
with WEBS(proxy=proxy) as webs:
|
| 395 |
-
|
| 396 |
-
extracted_text = str(web_search_and_extract_threading(q=q, region=region,
|
| 397 |
safesearch=safesearch,
|
| 398 |
timelimit=timelimit, backend=backend,
|
| 399 |
-
max_results=max_results)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 400 |
|
| 401 |
-
extracted_text = extracted_text[:max_chars]
|
| 402 |
|
| 403 |
# 3. Construct the prompt for the chat model
|
| 404 |
ai_prompt = (
|
|
|
|
| 392 |
"""
|
| 393 |
try:
|
| 394 |
with WEBS(proxy=proxy) as webs:
|
| 395 |
+
search_results = webs.text(keywords=q, region=region,
|
|
|
|
| 396 |
safesearch=safesearch,
|
| 397 |
timelimit=timelimit, backend=backend,
|
| 398 |
+
max_results=max_results)
|
| 399 |
+
|
| 400 |
+
# 2. Extract text from top search result URLs asynchronously
|
| 401 |
+
extracted_text = ""
|
| 402 |
+
tasks = [fetch_and_extract(result['href'], 6000, proxy) for result in search_results if 'href' in result]
|
| 403 |
+
extracted_results = await asyncio.gather(*tasks)
|
| 404 |
+
for result in extracted_results:
|
| 405 |
+
if result['text'] and len(extracted_text < max_chars):
|
| 406 |
+
extracted_text += f"## Content from: {result['link']}\n\n{result['text']}\n\n"
|
| 407 |
+
|
| 408 |
+
extracted_text[:max_chars]
|
| 409 |
|
|
|
|
| 410 |
|
| 411 |
# 3. Construct the prompt for the chat model
|
| 412 |
ai_prompt = (
|