siddhartharya's picture
Update app.py
7b16cc6 verified
raw
history blame
32.1 kB
# app.py
import gradio as gr
from bs4 import BeautifulSoup
from sentence_transformers import SentenceTransformer
import faiss
import numpy as np
import asyncio
import aiohttp
import re
import base64
import logging
import os
import sys
import time
# Import OpenAI library
import openai
# Set up logging to output to the console
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Create a console handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
# Create a formatter and set it for the handler
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')
console_handler.setFormatter(formatter)
# Add the handler to the logger
logger.addHandler(console_handler)
# Initialize models and variables
logger.info("Initializing models and variables")
embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
faiss_index = None
bookmarks = []
fetch_cache = {}
# Define the categories
CATEGORIES = [
"Social Media",
"News and Media",
"Education and Learning",
"Entertainment",
"Shopping and E-commerce",
"Finance and Banking",
"Technology",
"Health and Fitness",
"Travel and Tourism",
"Food and Recipes",
"Sports",
"Arts and Culture",
"Government and Politics",
"Business and Economy",
"Science and Research",
"Personal Blogs and Journals",
"Job Search and Careers",
"Music and Audio",
"Videos and Movies",
"Reference and Knowledge Bases",
"Dead Link",
"Uncategorized",
]
# Set up Groq Cloud API key and base URL
GROQ_API_KEY = os.getenv('GROQ_API_KEY')
if not GROQ_API_KEY:
logger.error("GROQ_API_KEY environment variable not set.")
# Set OpenAI API key and base URL to use Groq Cloud API
openai.api_key = GROQ_API_KEY
openai.api_base = "https://api.groq.com/openai/v1"
def extract_retry_after(error_message):
"""
Extract the retry-after time from the rate limit error message.
"""
match = re.search(r'Please try again in (\d+\.?\d*)s', error_message)
if match:
return float(match.group(1)) + 1 # Add a buffer of 1 second
else:
return 5 # Default retry after 5 seconds
def exponential_backoff(retries):
return min(60, (2 ** retries)) # Cap the wait time at 60 seconds
def extract_main_content(soup):
"""
Extract the main content from a webpage while filtering out boilerplate content.
"""
if not soup:
return ""
# Remove unwanted elements
for element in soup(['script', 'style', 'header', 'footer', 'nav', 'aside', 'form', 'noscript']):
element.decompose()
# Extract text from <p> tags
p_tags = soup.find_all('p')
if p_tags:
content = ' '.join([p.get_text(strip=True, separator=' ') for p in p_tags])
else:
# Fallback to body content
content = soup.get_text(separator=' ', strip=True)
# Clean up the text
content = re.sub(r'\s+', ' ', content) # Remove multiple spaces
# Truncate content to a reasonable length (e.g., 1500 words)
words = content.split()
if len(words) > 1500:
content = ' '.join(words[:1500])
return content
def get_page_metadata(soup):
"""
Extract metadata from the webpage including title, description, and keywords.
"""
metadata = {
'title': '',
'description': '',
'keywords': ''
}
if not soup:
return metadata
# Get title
title_tag = soup.find('title')
if title_tag and title_tag.string:
metadata['title'] = title_tag.string.strip()
# Get meta description
meta_desc = (
soup.find('meta', attrs={'name': 'description'}) or
soup.find('meta', attrs={'property': 'og:description'}) or
soup.find('meta', attrs={'name': 'twitter:description'})
)
if meta_desc:
metadata['description'] = meta_desc.get('content', '').strip()
# Get meta keywords
meta_keywords = soup.find('meta', attrs={'name': 'keywords'})
if meta_keywords:
metadata['keywords'] = meta_keywords.get('content', '').strip()
# Get OG title if main title is empty
if not metadata['title']:
og_title = soup.find('meta', attrs={'property': 'og:title'})
if og_title:
metadata['title'] = og_title.get('content', '').strip()
return metadata
async def generate_summary_async(bookmark):
async with llm_semaphore:
await asyncio.get_event_loop().run_in_executor(None, generate_summary, bookmark)
def generate_summary(bookmark):
"""
Generate a concise summary for a bookmark using available content and LLM via the Groq Cloud API.
"""
logger.info(f"Generating summary for bookmark: {bookmark.get('url')}")
try:
html_content = bookmark.get('html_content', '')
# Get the HTML soup object from the bookmark
soup = BeautifulSoup(html_content, 'html.parser')
# Extract metadata and main content
metadata = get_page_metadata(soup)
main_content = extract_main_content(soup)
# Prepare content for the prompt
content_parts = []
if metadata['title']:
content_parts.append(f"Title: {metadata['title']}")
if metadata['description']:
content_parts.append(f"Description: {metadata['description']}")
if metadata['keywords']:
content_parts.append(f"Keywords: {metadata['keywords']}")
if main_content:
content_parts.append(f"Main Content: {main_content}")
content_text = '\n'.join(content_parts)
# Detect insufficient or erroneous content
error_keywords = ['Access Denied', 'Security Check', 'Cloudflare', 'captcha', 'unusual traffic']
if not content_text or len(content_text.split()) < 50:
use_prior_knowledge = True
logger.info(f"Content for {bookmark.get('url')} is insufficient. Instructing LLM to use prior knowledge.")
elif any(keyword.lower() in content_text.lower() for keyword in error_keywords):
use_prior_knowledge = True
logger.info(f"Content for {bookmark.get('url')} contains error messages. Instructing LLM to use prior knowledge.")
else:
use_prior_knowledge = False
if use_prior_knowledge:
# Construct prompt to use prior knowledge
prompt = f"""
You are a knowledgeable assistant.
The user provided a URL: {bookmark.get('url')}
Please provide a concise summary in **no more than two sentences** about this website based on your knowledge.
Focus on:
- The main purpose or topic of the website.
- Key information or features.
Be concise and objective.
"""
else:
# Construct the prompt with the extracted content
prompt = f"""
You are a helpful assistant that creates concise webpage summaries.
Analyze the following webpage content:
{content_text}
Provide a concise summary in **no more than two sentences** focusing on:
- The main purpose or topic of the page.
- Key information or features.
Be concise and objective.
"""
# Call the LLM via Groq Cloud API
retries = 0
max_retries = 5
while retries <= max_retries:
try:
response = openai.ChatCompletion.create(
model='llama-3.1-70b-versatile',
messages=[
{"role": "user", "content": prompt}
],
max_tokens=100, # Reduced max tokens
temperature=0.5,
)
break # Exit loop if successful
except openai.error.RateLimitError as e:
retry_after = extract_retry_after(str(e)) or exponential_backoff(retries)
logger.warning(f"Rate limit exceeded. Retrying after {retry_after} seconds.")
time.sleep(retry_after)
retries += 1
except Exception as e:
logger.error(f"Error generating summary: {e}", exc_info=True)
bookmark['summary'] = 'No summary available.'
return bookmark
summary = response['choices'][0]['message']['content'].strip()
if not summary:
raise ValueError("Empty summary received from the model.")
logger.info("Successfully generated LLM summary")
bookmark['summary'] = summary
return bookmark
except Exception as e:
logger.error(f"Error generating summary: {e}", exc_info=True)
bookmark['summary'] = 'No summary available.'
return bookmark
async def assign_category_async(bookmark):
async with llm_semaphore:
await asyncio.get_event_loop().run_in_executor(None, assign_category, bookmark)
def assign_category(bookmark):
"""
Assign a category to a bookmark using the LLM based on its summary via the Groq Cloud API.
"""
if bookmark.get('dead_link'):
bookmark['category'] = 'Dead Link'
logger.info(f"Assigned category 'Dead Link' to bookmark: {bookmark.get('url')}")
return bookmark
summary = bookmark.get('summary', '')
if not summary:
bookmark['category'] = 'Uncategorized'
return bookmark
# Prepare the prompt
categories_str = ', '.join([f'"{cat}"' for cat in CATEGORIES if cat != 'Dead Link'])
prompt = f"""
You are a helpful assistant that categorizes webpages.
Based on the following summary, assign the most appropriate category from the list below.
Summary:
{summary}
Categories:
{categories_str}
Respond with only the category name.
"""
retries = 0
max_retries = 5
while retries <= max_retries:
try:
response = openai.ChatCompletion.create(
model='llama-3.1-70b-versatile',
messages=[
{"role": "user", "content": prompt}
],
max_tokens=10,
temperature=0,
)
break # Exit loop if successful
except openai.error.RateLimitError as e:
retry_after = extract_retry_after(str(e)) or exponential_backoff(retries)
logger.warning(f"Rate limit exceeded. Retrying after {retry_after} seconds.")
time.sleep(retry_after)
retries += 1
except Exception as e:
logger.error(f"Error assigning category: {e}", exc_info=True)
bookmark['category'] = 'Uncategorized'
return bookmark
category = response['choices'][0]['message']['content'].strip().strip('"')
# Validate the category
if category in CATEGORIES:
bookmark['category'] = category
logger.info(f"Assigned category '{category}' to bookmark: {bookmark.get('url')}")
else:
bookmark['category'] = 'Uncategorized'
logger.warning(f"Invalid category '{category}' returned by LLM for bookmark: {bookmark.get('url')}")
return bookmark
def parse_bookmarks(file_content):
"""
Parse bookmarks from HTML file.
"""
logger.info("Parsing bookmarks")
try:
soup = BeautifulSoup(file_content, 'html.parser')
extracted_bookmarks = []
for link in soup.find_all('a'):
url = link.get('href')
title = link.text.strip()
if url and title:
extracted_bookmarks.append({'url': url, 'title': title})
logger.info(f"Extracted {len(extracted_bookmarks)} bookmarks")
return extracted_bookmarks
except Exception as e:
logger.error("Error parsing bookmarks: %s", e, exc_info=True)
raise
async def fetch_url_info(session, bookmark):
"""
Fetch information about a URL asynchronously.
"""
url = bookmark['url']
if url in fetch_cache:
bookmark.update(fetch_cache[url])
return bookmark
max_retries = 0 # No retries
retries = 0
timeout_duration = 5 # Reduced timeout
while retries <= max_retries:
try:
logger.info(f"Fetching URL info for: {url} (Attempt {retries + 1})")
headers = {
'User-Agent': 'Mozilla/5.0',
'Accept-Language': 'en-US,en;q=0.9',
}
async with session.get(url, timeout=timeout_duration, headers=headers, ssl=False, allow_redirects=True) as response:
bookmark['etag'] = response.headers.get('ETag', 'N/A')
bookmark['status_code'] = response.status
content = await response.text()
logger.info(f"Fetched content length for {url}: {len(content)} characters")
# Handle status codes
if response.status >= 500:
# Server error, consider as dead link
bookmark['dead_link'] = True
bookmark['description'] = ''
bookmark['html_content'] = ''
logger.warning(f"Dead link detected: {url} with status {response.status}")
else:
bookmark['dead_link'] = False
bookmark['html_content'] = content
bookmark['description'] = ''
logger.info(f"Fetched information for {url}")
break # Exit loop if successful
except asyncio.exceptions.TimeoutError:
bookmark['dead_link'] = False # Mark as 'Unknown' instead of 'Dead'
bookmark['etag'] = 'N/A'
bookmark['status_code'] = 'Timeout'
bookmark['description'] = ''
bookmark['html_content'] = ''
bookmark['slow_link'] = True # Custom flag to indicate slow response
logger.warning(f"Timeout while fetching {url}. Marking as 'Slow'.")
break # Exit loop after timeout
except Exception as e:
bookmark['dead_link'] = True
bookmark['etag'] = 'N/A'
bookmark['status_code'] = 'Error'
bookmark['description'] = ''
bookmark['html_content'] = ''
logger.error(f"Error fetching URL info for {url}: {e}", exc_info=True)
break
finally:
fetch_cache[url] = {
'etag': bookmark.get('etag'),
'status_code': bookmark.get('status_code'),
'dead_link': bookmark.get('dead_link'),
'description': bookmark.get('description'),
'html_content': bookmark.get('html_content', ''),
'slow_link': bookmark.get('slow_link', False),
}
return bookmark
async def process_bookmarks_async(bookmarks_list):
"""
Fetch all bookmarks asynchronously.
"""
logger.info("Processing bookmarks asynchronously")
try:
connector = aiohttp.TCPConnector(limit=10) # Increase limit if necessary
timeout = aiohttp.ClientTimeout(total=60) # Set timeout
async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session:
tasks = []
for bookmark in bookmarks_list:
task = asyncio.ensure_future(fetch_url_info(session, bookmark))
tasks.append(task)
await asyncio.gather(*tasks)
logger.info("Completed processing bookmarks asynchronously")
except Exception as e:
logger.error(f"Error in asynchronous processing of bookmarks: {e}", exc_info=True)
raise
async def process_bookmarks_llm(bookmarks_list):
"""
Process bookmarks asynchronously for LLM API calls.
"""
logger.info("Processing bookmarks with LLM asynchronously")
tasks = []
for bookmark in bookmarks_list:
tasks.append(generate_summary_async(bookmark))
await asyncio.gather(*tasks)
tasks = []
for bookmark in bookmarks_list:
tasks.append(assign_category_async(bookmark))
await asyncio.gather(*tasks)
logger.info("Completed LLM processing of bookmarks")
def vectorize_and_index(bookmarks_list):
"""
Create vector embeddings for bookmarks and build FAISS index with ID mapping.
"""
logger.info("Vectorizing summaries and building FAISS index")
try:
summaries = [bookmark['summary'] for bookmark in bookmarks_list]
embeddings = embedding_model.encode(summaries)
dimension = embeddings.shape[1]
index = faiss.IndexIDMap(faiss.IndexFlatL2(dimension))
# Assign unique IDs to each bookmark
ids = np.array([bookmark['id'] for bookmark in bookmarks_list], dtype=np.int64)
index.add_with_ids(np.array(embeddings).astype('float32'), ids)
logger.info("FAISS index built successfully with IDs")
return index
except Exception as e:
logger.error(f"Error in vectorizing and indexing: {e}", exc_info=True)
raise
def display_bookmarks():
"""
Generate HTML display for bookmarks.
"""
logger.info("Generating HTML display for bookmarks")
cards = ''
for i, bookmark in enumerate(bookmarks):
index = i + 1
if bookmark.get('dead_link'):
status = "❌ Dead Link"
card_style = "border: 2px solid var(--error-color);"
text_style = "color: var(--error-color);"
elif bookmark.get('slow_link'):
status = "⏳ Slow Response"
card_style = "border: 2px solid orange;"
text_style = "color: orange;"
else:
status = "βœ… Active"
card_style = "border: 2px solid var(--success-color);"
text_style = "color: var(--text-color);"
title = bookmark['title']
url = bookmark['url']
etag = bookmark.get('etag', 'N/A')
summary = bookmark.get('summary', '')
category = bookmark.get('category', 'Uncategorized')
# Escape HTML content to prevent XSS attacks
from html import escape
title = escape(title)
url = escape(url)
summary = escape(summary)
category = escape(category)
card_html = f'''
<div class="card" style="{card_style}; padding: 10px; margin: 10px; border-radius: 5px;">
<div class="card-content">
<h3 style="{text_style}">{index}. {title} {status}</h3>
<p style="{text_style}"><strong>Category:</strong> {category}</p>
<p style="{text_style}"><strong>URL:</strong> <a href="{url}" target="_blank" style="{text_style}">{url}</a></p>
<p style="{text_style}"><strong>ETag:</strong> {etag}</p>
<p style="{text_style}"><strong>Summary:</strong> {summary}</p>
</div>
</div>
'''
cards += card_html
logger.info("HTML display generated")
return cards
def process_uploaded_file(file):
"""
Process the uploaded bookmarks file.
"""
global bookmarks, faiss_index
logger.info("Processing uploaded file")
if file is None:
logger.warning("No file uploaded")
return "Please upload a bookmarks HTML file.", '', gr.update(choices=[]), display_bookmarks()
try:
file_content = file.decode('utf-8')
except UnicodeDecodeError as e:
logger.error(f"Error decoding the file: {e}", exc_info=True)
return "Error decoding the file. Please ensure it's a valid HTML file.", '', gr.update(choices=[]), display_bookmarks()
try:
bookmarks = parse_bookmarks(file_content)
except Exception as e:
logger.error(f"Error parsing bookmarks: {e}", exc_info=True)
return "Error parsing the bookmarks HTML file.", '', gr.update(choices=[]), display_bookmarks()
if not bookmarks:
logger.warning("No bookmarks found in the uploaded file")
return "No bookmarks found in the uploaded file.", '', gr.update(choices=[]), display_bookmarks()
# Assign unique IDs to bookmarks
for idx, bookmark in enumerate(bookmarks):
bookmark['id'] = idx
# Asynchronously fetch bookmark info
try:
asyncio.run(process_bookmarks_async(bookmarks))
except Exception as e:
logger.error(f"Error processing bookmarks asynchronously: {e}", exc_info=True)
return "Error processing bookmarks.", '', gr.update(choices=[]), display_bookmarks()
# Asynchronously process bookmarks with LLM
try:
asyncio.run(process_bookmarks_llm(bookmarks))
except Exception as e:
logger.error(f"Error processing bookmarks with LLM: {e}", exc_info=True)
return "Error processing bookmarks with LLM.", '', gr.update(choices=[]), display_bookmarks()
try:
faiss_index = vectorize_and_index(bookmarks)
except Exception as e:
logger.error(f"Error building FAISS index: {e}", exc_info=True)
return "Error building search index.", '', gr.update(choices=[]), display_bookmarks()
message = f"βœ… Successfully processed {len(bookmarks)} bookmarks."
logger.info(message)
# Generate displays and updates
bookmark_html = display_bookmarks()
choices = [f"{i+1}. {bookmark['title']} (Category: {bookmark['category']})"
for i, bookmark in enumerate(bookmarks)]
return message, bookmark_html, gr.update(choices=choices), bookmark_html
def delete_selected_bookmarks(selected_indices):
"""
Delete selected bookmarks and remove their vectors from the FAISS index.
"""
global bookmarks, faiss_index
if not selected_indices:
return "⚠️ No bookmarks selected.", gr.update(choices=[]), display_bookmarks()
ids_to_delete = []
indices_to_delete = []
for s in selected_indices:
idx = int(s.split('.')[0]) - 1
if 0 <= idx < len(bookmarks):
bookmark_id = bookmarks[idx]['id']
ids_to_delete.append(bookmark_id)
indices_to_delete.append(idx)
logger.info(f"Deleting bookmark at index {idx + 1}")
# Remove vectors from FAISS index
if faiss_index is not None and ids_to_delete:
faiss_index.remove_ids(np.array(ids_to_delete, dtype=np.int64))
# Remove bookmarks from the list (reverse order to avoid index shifting)
for idx in sorted(indices_to_delete, reverse=True):
bookmarks.pop(idx)
message = "πŸ—‘οΈ Selected bookmarks deleted successfully."
logger.info(message)
choices = [f"{i+1}. {bookmark['title']} (Category: {bookmark['category']})"
for i, bookmark in enumerate(bookmarks)]
return message, gr.update(choices=choices), display_bookmarks()
def edit_selected_bookmarks_category(selected_indices, new_category):
"""
Edit category of selected bookmarks.
"""
if not selected_indices:
return "⚠️ No bookmarks selected.", gr.update(choices=[]), display_bookmarks()
if not new_category:
return "⚠️ No new category selected.", gr.update(choices=[]), display_bookmarks()
indices = [int(s.split('.')[0])-1 for s in selected_indices]
for idx in indices:
if 0 <= idx < len(bookmarks):
bookmarks[idx]['category'] = new_category
logger.info(f"Updated category for bookmark {idx + 1} to {new_category}")
message = "✏️ Category updated for selected bookmarks."
logger.info(message)
# Update choices and display
choices = [f"{i+1}. {bookmark['title']} (Category: {bookmark['category']})"
for i, bookmark in enumerate(bookmarks)]
return message, gr.update(choices=choices), display_bookmarks()
def export_bookmarks():
"""
Export bookmarks to HTML file.
"""
if not bookmarks:
logger.warning("No bookmarks to export")
return "⚠️ No bookmarks to export."
try:
logger.info("Exporting bookmarks to HTML")
soup = BeautifulSoup("<!DOCTYPE NETSCAPE-Bookmark-file-1><Title>Bookmarks</Title><H1>Bookmarks</H1>", 'html.parser')
dl = soup.new_tag('DL')
for bookmark in bookmarks:
dt = soup.new_tag('DT')
a = soup.new_tag('A', href=bookmark['url'])
a.string = bookmark['title']
dt.append(a)
dl.append(dt)
soup.append(dl)
html_content = str(soup)
b64 = base64.b64encode(html_content.encode()).decode()
href = f'data:text/html;base64,{b64}'
logger.info("Bookmarks exported successfully")
return f'<a href="{href}" download="bookmarks.html">πŸ’Ύ Download Exported Bookmarks</a>'
except Exception as e:
logger.error(f"Error exporting bookmarks: {e}", exc_info=True)
return "⚠️ Error exporting bookmarks."
def chatbot_response(user_query):
"""
Generate chatbot response using the FAISS index and embeddings.
"""
if not bookmarks or faiss_index is None:
logger.warning("No bookmarks available for chatbot")
return "⚠️ No bookmarks available. Please upload and process your bookmarks first."
logger.info(f"Chatbot received query: {user_query}")
try:
# Encode the user query
query_vector = embedding_model.encode([user_query]).astype('float32')
# Search the FAISS index
k = 5 # Number of results to return
distances, ids = faiss_index.search(query_vector, k)
ids = ids.flatten()
# Retrieve the bookmarks
id_to_bookmark = {bookmark['id']: bookmark for bookmark in bookmarks}
matching_bookmarks = [id_to_bookmark.get(id) for id in ids if id in id_to_bookmark]
if not matching_bookmarks:
return "No relevant bookmarks found for your query."
# Format the response
bookmarks_info = "\n".join([
f"Title: {bookmark['title']}\nURL: {bookmark['url']}\nSummary: {bookmark['summary']}"
for bookmark in matching_bookmarks
])
# Use the LLM via Groq Cloud API to generate a response
prompt = f"""
A user asked: "{user_query}"
Based on the bookmarks below, provide a helpful answer to the user's query, referencing the relevant bookmarks.
Bookmarks:
{bookmarks_info}
Provide a concise and helpful response.
"""
retries = 0
max_retries = 5
while retries <= max_retries:
try:
response = openai.ChatCompletion.create(
model='llama-3.1-70b-versatile',
messages=[
{"role": "user", "content": prompt}
],
max_tokens=500,
temperature=0.7,
)
break # Exit loop if successful
except openai.error.RateLimitError as e:
retry_after = extract_retry_after(str(e)) or exponential_backoff(retries)
logger.warning(f"Rate limit exceeded. Retrying after {retry_after} seconds.")
time.sleep(retry_after)
retries += 1
except Exception as e:
error_message = f"⚠️ Error processing your query: {str(e)}"
logger.error(error_message, exc_info=True)
return error_message
answer = response['choices'][0]['message']['content'].strip()
logger.info("Chatbot response generated using Groq Cloud API")
return answer
except Exception as e:
error_message = f"⚠️ Error processing your query: {str(e)}"
logger.error(error_message, exc_info=True)
return error_message
def build_app():
"""
Build and launch the Gradio app.
"""
try:
logger.info("Building Gradio app")
with gr.Blocks(css="app.css") as demo:
# General Overview
gr.Markdown("""
# πŸ“š SmartMarks - AI Browser Bookmarks Manager
Welcome to **SmartMarks**, your intelligent assistant for managing browser bookmarks. SmartMarks leverages AI to help you organize, search, and interact with your bookmarks seamlessly.
---
## πŸš€ **How to Use SmartMarks**
SmartMarks is divided into three main sections:
1. **πŸ“‚ Upload and Process Bookmarks:** Import your existing bookmarks and let SmartMarks analyze and categorize them for you.
2. **πŸ’¬ Chat with Bookmarks:** Interact with your bookmarks using natural language queries to find relevant links effortlessly.
3. **πŸ› οΈ Manage Bookmarks:** View, edit, delete, and export your bookmarks with ease.
""")
# Upload and Process Bookmarks Tab
with gr.Tab("Upload and Process Bookmarks"):
gr.Markdown("""
## πŸ“‚ **Upload and Process Bookmarks**
### πŸ“ **Steps:**
1. Click on the "Upload Bookmarks HTML File" button
2. Select your bookmarks file
3. Click "Process Bookmarks" to analyze and organize your bookmarks
""")
upload = gr.File(label="πŸ“ Upload Bookmarks HTML File", type='binary')
process_button = gr.Button("βš™οΈ Process Bookmarks")
output_text = gr.Textbox(label="βœ… Output", interactive=False)
bookmark_display = gr.HTML(label="πŸ“„ Processed Bookmarks")
# Chat with Bookmarks Tab
with gr.Tab("Chat with Bookmarks"):
gr.Markdown("""
## πŸ’¬ **Chat with Bookmarks**
Ask questions about your bookmarks and get relevant results.
""")
user_input = gr.Textbox(
label="✍️ Ask about your bookmarks",
placeholder="e.g., Do I have any bookmarks about AI?"
)
chat_button = gr.Button("πŸ“¨ Send")
chat_output = gr.Textbox(label="πŸ’¬ Response", interactive=False)
# Manage Bookmarks Tab
with gr.Tab("Manage Bookmarks"):
gr.Markdown("""
## πŸ› οΈ **Manage Bookmarks**
Select bookmarks to delete or edit their categories.
""")
manage_output = gr.Textbox(label="πŸ”„ Status", interactive=False)
bookmark_selector = gr.CheckboxGroup(
label="βœ… Select Bookmarks",
choices=[]
)
new_category = gr.Dropdown(
label="πŸ†• New Category",
choices=CATEGORIES,
value="Uncategorized"
)
bookmark_display_manage = gr.HTML(label="πŸ“„ Bookmarks")
with gr.Row():
delete_button = gr.Button("πŸ—‘οΈ Delete Selected")
edit_category_button = gr.Button("✏️ Edit Category")
export_button = gr.Button("πŸ’Ύ Export")
download_link = gr.HTML(label="πŸ“₯ Download")
# Set up event handlers
process_button.click(
process_uploaded_file,
inputs=upload,
outputs=[output_text, bookmark_display, bookmark_selector, bookmark_display_manage]
)
chat_button.click(
chatbot_response,
inputs=user_input,
outputs=chat_output
)
delete_button.click(
delete_selected_bookmarks,
inputs=bookmark_selector,
outputs=[manage_output, bookmark_selector, bookmark_display_manage]
)
edit_category_button.click(
edit_selected_bookmarks_category,
inputs=[bookmark_selector, new_category],
outputs=[manage_output, bookmark_selector, bookmark_display_manage]
)
export_button.click(
export_bookmarks,
outputs=download_link
)
logger.info("Launching Gradio app")
demo.launch(debug=True)
except Exception as e:
logger.error(f"Error building the app: {e}", exc_info=True)
print(f"Error building the app: {e}")
if __name__ == "__main__":
# Define a semaphore to limit concurrent LLM API calls
llm_semaphore = asyncio.Semaphore(3) # Adjust based on allowed concurrency
build_app()