Spaces:
Sleeping
Sleeping
import os | |
import json | |
import asyncio | |
import traceback | |
import base64 | |
from datetime import datetime | |
import re | |
# Imports for Flask web server | |
from flask import Flask, request | |
# Imports from your existing script (assuming these are the necessary ones) | |
from huggingface_hub import InferenceClient, HfApi | |
import torch | |
import warnings | |
from sentence_transformers import SentenceTransformer, util, CrossEncoder | |
import gspread | |
from tqdm import tqdm | |
from ddgs import DDGS | |
import spacy | |
from dateutil.relativedelta import relativedelta | |
import dateparser | |
from dateparser.search import search_dates | |
import pytz | |
# Use os.environ.get for environment variables in a standard web app context | |
# from google.colab import userdata # Not used in a typical Hugging Face Space deployment | |
# from google.colab.userdata import SecretNotFoundError # Not used in a typical Hugging Face Space deployment | |
from datasets import Dataset, DatasetDict, concatenate_datasets, load_dataset | |
import faiss | |
import numpy as np | |
import pickle | |
from twilio.rest import Client # Import Twilio Client | |
# Define the dataset name (replace with your actual Hugging Face username and desired dataset name) | |
dataset_name = "Futuresony/Logs_Conversation" # REPLACE WITH YOUR ACTUAL DATASET NAME | |
# Global variable to store the dataset | |
conversation_dataset = None | |
# Suppress warnings | |
warnings.filterwarnings("ignore", category=UserWarning) | |
# Define global variables and load secrets from environment variables for HF Spaces | |
# Use os.environ.get for standard environment variable access in web apps | |
HF_TOKEN = os.environ.get("HF_TOKEN") | |
print(f"HF_TOKEN loaded: {'Yes' if HF_TOKEN else 'No'}") | |
SHEET_ID = os.environ.get("SHEET_ID") # Get SHEET_ID from environment variables | |
GOOGLE_BASE64_CREDENTIALS = os.environ.get("GOOGLE_BASE64_CREDENTIALS") | |
SECRET_API_KEY = os.environ.get("APP_API_KEY") | |
print(f"SECRET_API_KEY loaded: {'Yes' if SECRET_API_KEY else 'No'}") | |
if not SECRET_API_KEY: | |
print("Warning: APP_API_KEY environment variable not set. API key validation will fail.") | |
elif not SECRET_API_KEY.startswith("fs_"): | |
print("Warning: APP_API_KEY environment variable does not start with 'fs_'. Please check your secret.") | |
# Twilio credentials from environment variables | |
TWILIO_ACCOUNT_SID = os.environ.get("TWILIO_ACCOUNT_SID") | |
TWILIO_AUTH_TOKEN = os.environ.get("TWILIO_AUTH_TOKEN") | |
TWILIO_WHATSAPP_NUMBER = os.environ.get("TWILIO_WHATSAPP_NUMBER") # Get Twilio WhatsApp number | |
# Check if Twilio credentials are loaded | |
if not TWILIO_ACCOUNT_SID: | |
print("Warning: TWILIO_ACCOUNT_SID environment variable not set.") | |
if not TWILIO_AUTH_TOKEN: | |
print("Warning: TWILIO_AUTH_TOKEN environment variable not set.") | |
if not TWILIO_WHATSAPP_NUMBER: | |
print("Warning: TWILIO_WHATSAPP_NUMBER environment variable not set.") | |
elif TWILIO_WHATSAPP_NUMBER and not TWILIO_WHATSAPP_NUMBER.startswith('whatsapp:'): | |
print(f"Warning: TWILIO_WHATSAPP_NUMBER '{TWILIO_WHATSAPP_NUMBER}' does not start with 'whatsapp:'. Ensure it's in the correct format (e.g., 'whatsapp:+14155238886').") | |
# Initialize Hugging Face Inference Clients | |
primary_client = None | |
fallback_client = None | |
try: | |
primary_client = InferenceClient("meta-llama/Llama-3.3-70B-Instruct", token=HF_TOKEN) | |
print("Primary model (LLaMA-3.3-70B-Instruct) client initialized.") | |
except Exception as e: | |
print(f"Error initializing primary model client: {e}") | |
print(traceback.format_exc()) | |
try: | |
fallback_client = InferenceClient("google/gemma-2-9b-it", token=HF_TOKEN) | |
print("Fallback model (Gemma-2-9b-it) client initialized.") | |
except Exception as e: | |
print(f"Error initializing fallback model client: {e}") | |
print(traceback.format_exc()) | |
# Load spacy model for sentence splitting | |
nlp = None | |
try: | |
nlp = spacy.load("en_core_web_sm") | |
print("SpaCy model 'en_core_web_sm' loaded.") | |
except OSError: | |
print("SpaCy model 'en_core_web_sm' not found. Downloading...") | |
try: | |
import subprocess | |
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"], check=True) | |
nlp = spacy.load("en_core_web_sm") | |
print("SpaCy model 'en_core_web_sm' downloaded and loaded.") | |
except Exception as e: | |
print(f"Failed to download or load SpaCy model: {e}") | |
# Load SentenceTransformer for RAG/business info retrieval and semantic detection | |
embedder = None | |
try: | |
print("Attempting to load Sentence Transformer (sentence-transformers/paraphrase-MiniLM-L6-v2)...") | |
embedder = SentenceTransformer("sentence-transformers/paraphrase-MiniLM-L6-v2") | |
print("Sentence Transformer loaded.") | |
except Exception as e: | |
print(f"Error loading Sentence Transformer: {e}") | |
# Load a Cross-Encoder model for re-ranking retrieved documents | |
reranker = None | |
try: | |
print("Attempting to load Cross-Encoder Reranker (cross-encoder/ms-marco-MiniLM-L6-v2)...") | |
reranker = CrossEncoder('cross-encoder/ms-marco-MiniLM-L6-v2') | |
print("Cross-Encoder Reranker loaded.") | |
except Exception as e: | |
print(f"Error loading Cross-Encoder Reranker: {e}") | |
print("Please ensure the model identifier 'cross-encoder/ms-marco-MiniLM-L6-v2' is correct and accessible on Hugging Face Hub.") | |
print(traceback.format_exc()) | |
reranker = None | |
# Google Sheets Authentication | |
gc = None | |
def authenticate_google_sheets(): | |
"""Authenticates with Google Sheets using base64 encoded credentials.""" | |
global gc | |
print("Authenticating Google Account...") | |
if not GOOGLE_BASE64_CREDENTIALS: | |
print("Error: GOOGLE_BASE64_CREDENTIALS environment variable not found.") | |
return False | |
try: | |
credentials_json = base64.b64decode(GOOGLE_BASE64_CREDENTIALS).decode('utf-8') | |
credentials = json.loads(credentials_json) | |
gc = gspread.service_account_from_dict(credentials) | |
print("Google Sheets authentication successful via service account.") | |
return True | |
except Exception as e: | |
print(f"Google Sheets authentication failed: {e}") | |
print(traceback.format_exc()) | |
print("Please ensure your GOOGLE_BASE64_CREDENTIALS environment variable is correctly set and contains valid service account credentials.") | |
return False | |
# Google Sheets Data Loading and Embedding for RAG | |
data = [] | |
descriptions_for_embedding = [] | |
embeddings = torch.tensor([]) # This will store embeddings for RAG data | |
business_info_available = False | |
def load_business_info(): | |
"""Loads business information from Google Sheet and creates embeddings.""" | |
global data, descriptions_for_embedding, embeddings, business_info_available | |
business_info_available = False | |
if gc is None: | |
print("Skipping Google Sheet loading: Google Sheets client not authenticated.") | |
return | |
if not SHEET_ID: | |
print("Error: SHEET_ID environment variable not set.") | |
return | |
try: | |
sheet = gc.open_by_key(SHEET_ID).sheet1 | |
print(f"Successfully opened Google Sheet with ID: {SHEET_ID}") | |
data_records = sheet.get_all_records() | |
if not data_records: | |
print(f"Warning: No data records found in Google Sheet with ID: {SHEET_ID}") | |
data = [] | |
descriptions_for_embedding = [] | |
else: | |
filtered_data = [row for row in data_records if row.get('Service') and row.get('Description')] | |
if not filtered_data: | |
print("Warning: Filtered data is empty after checking for 'Service' and 'Description'.") | |
data = [] | |
descriptions_for_embedding = [] | |
else: | |
data = filtered_data | |
descriptions_for_embedding = [f"Service: {row['Service']}. Description: {row['Description']}" for row in data] | |
if descriptions_for_embedding and embedder is not None: | |
print("Encoding descriptions for RAG...") | |
try: | |
embeddings = embedder.encode(descriptions_for_embedding, convert_to_tensor=True) | |
print("Encoding complete. RAG embeddings created.") | |
business_info_available = True | |
except Exception as e: | |
print(f"Error during description encoding for RAG: {e}") | |
embeddings = torch.tensor([]) | |
business_info_available = False | |
else: | |
print("Skipping encoding descriptions for RAG: No descriptions found or embedder not available.") | |
embeddings = torch.tensor([]) | |
business_info_available = False | |
print(f"Loaded {len(descriptions_for_embedding)} entries from Google Sheet for embedding/RAG.") | |
if not business_info_available: | |
print("Business information retrieval (RAG) is NOT available.") | |
else: | |
print("Business information retrieval (RAG) is available.") | |
except gspread.exceptions.SpreadsheetNotFound: | |
print(f"Error: Google Sheet with ID '{SHEET_ID}' not found.") | |
print("Please check the SHEET_ID and ensure your authenticated Google Account has access to this sheet.") | |
business_info_available = False | |
except Exception as e: | |
print(f"An error occurred while accessing the Google Sheet: {e}") | |
print(traceback.format_exc()) | |
business_info_available = False | |
# Business Info Retrieval (RAG) function | |
def retrieve_business_info(query: str, top_n: int = 3) -> list: | |
""" | |
Retrieves relevant business information from loaded data based on a query. | |
""" | |
global data, embeddings | |
if not business_info_available or embedder is None or not descriptions_for_embedding or not data or embeddings.numel() == 0: | |
print("Business information retrieval is not available or RAG data is empty.") | |
return [] | |
try: | |
query_embedding = embedder.encode(query, convert_to_tensor=True) | |
if query_embedding.device != embeddings.device: | |
query_embedding = query_embedding.to(embeddings.device) | |
cosine_scores = util.cos_sim(query_embedding, embeddings)[0] | |
top_results_indices = torch.topk(cosine_scores, k=min(top_n, len(data)))[1].tolist() | |
top_results = [data[i] for i in top_results_indices] | |
if reranker is not None and top_results: | |
print("Re-ranking top results...") | |
rerank_pairs = [(query, descriptions_for_embedding[i]) for i in top_results_indices] | |
rerank_scores = reranker.predict(rerank_pairs) | |
reranked_indices = sorted(range(len(rerank_scores)), key=lambda i: rerank_scores[i], reverse=True) | |
reranked_results = [top_results[i] for i in reranked_indices] | |
print("Re-ranking complete.") | |
return reranked_results | |
else: | |
return top_results | |
except Exception as e: | |
print(f"Error during business information retrieval: {e}") | |
print(traceback.format_exc()) | |
return [] | |
# Function to perform DuckDuckGo Search and return results with URLs | |
async def perform_duckduckgo_search(query: str, max_results: int = 5): | |
""" | |
Performs a search using DuckDuckGo asynchronously and returns a list of dictionaries. | |
""" | |
print(f"Executing Tool: perform_duckduckgo_search with query='{query}')") | |
search_results_list = [] | |
try: | |
await asyncio.sleep(1) | |
with DDGS() as ddgs: | |
search_query = query.strip() | |
if not search_query or len(search_query.split()) < 2: | |
print(f"Skipping search for short query: '{search_query}'") | |
return [] | |
print(f"Sending search query to DuckDuckGo: '{search_query}'") | |
loop = asyncio.get_event_loop() | |
results_generator = await loop.run_in_executor(None, lambda: list(ddgs.text(search_query, max_results=max_results))) | |
results_found = False | |
for r in results_generator: | |
search_results_list.append(r) | |
results_found = True | |
print(f"Raw results from DuckDuckGo: {search_results_list}") | |
if not results_found and max_results > 0: | |
print(f"DuckDuckGo search for '{search_query}' returned no results.") | |
elif results_found: | |
print(f"DuckDuckGo search for '{search_query}' completed. Found {len(search_results_list)} results.") | |
except Exception as e: | |
print(f"Error during Duckduckgo search for '{search_query if 'search_query' in locals() else query}': {e}") | |
print(traceback.format_exc()) | |
return f"An error occurred during web search: {e}" | |
return search_results_list | |
# Semantic date/time detection and calculation function using dateparser | |
async def perform_date_calculation(query: str) -> str or None: | |
""" | |
Analyzes query for date/time information using dateparser asynchronously. | |
""" | |
print(f"Executing Tool: perform_date_calculation with query='{query}') using dateparser.search_dates") | |
try: | |
await asyncio.sleep(0.1) | |
eafrica_tz = pytz.timezone('Africa/Dar_es_Salaam') | |
now = datetime.now(eafrica_tz) | |
except pytz.UnknownTimeZoneError: | |
print("Error: Unknown timezone 'Africa/Dar_es_Salaam'. Using default system time.") | |
now = datetime.now() | |
try: | |
loop = asyncio.get_event_loop() | |
found = await loop.run_in_executor(None, lambda: search_dates( | |
query, | |
settings={ | |
"PREFER_DATES_FROM": "future", | |
"RELATIVE_BASE": now | |
}, | |
languages=['sw', 'en'] | |
)) | |
if not found: | |
print("dateparser.search_dates could not parse any date/time.") | |
return None | |
text_snippet, parsed = found[0] | |
print(f"dateparser.search_dates found: text='{text_snippet}', parsed='{parsed}'") | |
is_swahili = any(swahili_phrase in query.lower() for swahili_phrase in ['tarehe', 'siku', 'saa', 'muda', 'leo', 'kesho', 'jana', 'ngapi', 'gani', 'mwezi', 'mwaka', 'habari', 'mambo', 'shikamoo', 'karibu', 'asante']) | |
if is_swahili: | |
query_lower = query.lower().strip() | |
if query_lower in ['habari', 'mambo', 'habari gani']: | |
return "Nzuri! Habari zako?" | |
elif query_lower in ['shikamoo']: | |
return "Marahaba!" | |
elif query_lower in ['asante']: | |
return "Karibu!" | |
elif query_lower in ['karibu']: | |
return "Asante!" | |
if parsed.tzinfo is not None and now.tzinfo is None: | |
print("Warning: Parsed date has timezone, but current time does not. Cannot compare accurately.") | |
pass | |
if parsed.date() == now.date(): | |
if abs((parsed.replace(tzinfo=None) - now.replace(tzinfo=None)).total_seconds()) < 60 or parsed.time() == datetime.min.time(): | |
print("Query parsed to today's date and time is close to 'now' or midnight, returning current time/date.") | |
if is_swahili: | |
return f"Kwa saa za Afrika Mashariki (Tanzania), tarehe ya leo ni {now.strftime('%A, %d %B %Y')} na saa ni {now.strftime('%H:%M:%S')}." | |
else: | |
return f"In East Africa (Tanzania), the current date is {now.strftime('%A, %d %B %Y')} and the time is {now.strftime('%H:%M:%S')}." | |
else: | |
print(f"Query parsed to a specific time today: {parsed.strftime('%H:%M:%S')}") | |
if is_swahili: | |
return f"Hiyo inafanyika leo, {parsed.strftime('%A, %d %B %Y')}, saa {parsed.strftime('%H:%M:%S')} saa za Afrika Mashariki." | |
else: | |
return f"That falls on today, {parsed.strftime('%A, %d %B %Y')}, at {parsed.strftime('%H:%M:%S')} East Africa Time." | |
else: | |
print(f"Query parsed to a specific date: {parsed.strftime('%A, %d %B %Y')} at {parsed.strftime('%H:%M:%S')}") | |
time_str = parsed.strftime('%H:%M:%S') | |
date_str = parsed.strftime('%A, %d %B %Y') | |
if parsed.tzinfo: | |
tz_name = parsed.tzinfo.tzname(parsed) or 'UTC' | |
if is_swahili: | |
return f"Hiyo inafanyika tarehe {date_str} saa {time_str} {tz_name}." | |
else: | |
return f"That falls on {date_str} at {time_str} {tz_name}." | |
else: | |
if is_swahili: | |
return f"Hiyo inafanyika tarehe {date_str} saa {time_str}." | |
else: | |
return f"That falls on {date_str} at {time_str}." | |
except Exception as e: | |
print(f"Error during dateparser.search_dates execution: {e}") | |
print(traceback.format_exc()) | |
return f"An error occurred while parsing date/time: {e}" | |
# Function to determine if a query requires a tool or can be answered directly | |
def determine_tool_usage(query: str) -> tuple[str, str]: | |
""" | |
Analyzes the query to determine if a specific tool is needed and its complexity. | |
Returns a tuple: (tool_name, complexity_level) | |
Complexity levels: 'simple' (fallback), 'complex' (primary) | |
""" | |
query_lower = query.lower() | |
swahili_conversational_phrases = ['habari', 'mambo', 'shikamoo', 'karibu', 'asante', 'habari gani'] | |
if any(swahili_phrase in query_lower for swahili_phrase in swahili_conversational_phrases): | |
print(f"Detected a Swahili conversational phrase: '{query}'. Using 'date_calculation' tool and 'simple' complexity.") | |
return "date_calculation", "simple" | |
if business_info_available and primary_client: | |
messages_business_check = [{"role": "user", "content": f"Does the following query ask about a specific person, service, offering, or description that is likely to be found *only* within a specific business's internal knowledge base, and not general knowledge? For example, questions about 'Salum' or 'Jackson Kisanga' are likely business-related, while questions about 'the current president of the USA' or 'who won the Ballon d'Or' are general knowledge. Answer only 'yes' or 'no'. Query: {query}"}] | |
try: | |
business_check_response = primary_client.chat_completion( | |
messages=messages_business_check, | |
max_tokens=10, | |
temperature=0.1 | |
).choices[0].message.content.strip().lower() | |
if business_check_response == "yes": | |
print(f"Detected as specific business info query based on LLM check: '{query}'. Using 'business_info_retrieval' tool and 'simple' complexity.") | |
return "business_info_retrieval", "simple" | |
else: | |
print(f"LLM check indicates not a specific business info query: '{query}')") | |
except Exception as e: | |
print(f"Error during LLM call for business info check for query '{query}': {e}") | |
print(traceback.format_exc()) | |
print(f"Proceeding without business info check for query '{query}' due to error.") | |
date_time_keywords = ['date', 'time', 'when', 'what day', 'what time', 'leo', 'kesho', 'jana', 'muda', 'saa', 'tarehe', 'siku'] | |
if any(keyword in query_lower for keyword in date_time_keywords): | |
print(f"Detected date/time keywords in query: '{query}'. Suggesting 'date_calculation' tool.") | |
if primary_client: | |
messages_complexity = [{"role": "user", "content": f"Is the following query simple or complex? A simple query is a basic question, a greeting, or a question that can be answered with a short, direct response. A complex query requires detailed understanding, multiple steps, or external information synthesis. Respond ONLY with 'simple' or 'complex'. Query: {query}"}] | |
try: | |
complexity_response = primary_client.chat_completion( | |
messages=messages_complexity, | |
max_tokens=10, | |
temperature=0.1 | |
).choices[0].message.content.strip().lower() | |
print(f"Determined complexity for date/time query '{query}': '{complexity_response}'") | |
return "date_calculation", complexity_response | |
except Exception as e: | |
print(f"Error determining complexity for date/time query '{query}': {e}. Defaulting to 'simple'.") | |
return "date_calculation", "simple" | |
else: | |
print("Primary client not available for complexity check. Defaulting date/time query to 'simple'.") | |
return "date_calculation", "simple" | |
if primary_client: | |
messages_tool_determination_search = [{"role": "user", "content": f"Does the following query require searching the web for current or general knowledge information (e.g., news, facts, definitions, current events)? Respond ONLY with 'duckduckgo_search' or 'none'. Query: {query}"}] | |
try: | |
search_determination_response = primary_client.chat_completion( | |
messages=messages_tool_determination_search, | |
max_tokens=20, | |
temperature=0.1, | |
top_p=0.9 | |
).choices[0].message.content or "" | |
response_lower = search_determination_response.strip().lower() | |
if "duckduckgo_search" in response_lower: | |
print(f"Model-determined tool for '{query}': 'duckduckgo_search'. Using 'complex' complexity.") | |
return "duckduckgo_search", "complex" | |
else: | |
print(f"Model-determined tool for '{query}': 'none' (for search).") | |
except Exception as e: | |
print(f"Error during LLM call for search tool determination for query '{query}': {e}") | |
print(traceback.format_exc()) | |
print(f"Proceeding without search tool check for query '{query}' due to error.") | |
if primary_client: | |
messages_complexity = [{"role": "user", "content": f"Is the following query simple or complex? A simple query is a basic question, a greeting, or a question that can be answered with a short, direct response. A complex query requires detailed understanding, multiple steps, or external information synthesis. Respond ONLY with 'simple' or 'complex'. Query: {query}"}] | |
try: | |
complexity_response = primary_client.chat_completion( | |
messages=messages_complexity, | |
max_tokens=10, | |
temperature=0.1 | |
).choices[0].message.content.strip().lower() | |
if "complex" in complexity_response: | |
print(f"Determined query complexity for '{query}': 'complex'. Using 'none' tool.") | |
return "none", "complex" | |
else: | |
print(f"Determined query complexity for '{query}': 'simple'. Using 'none' tool.") | |
return "none", "simple" | |
except Exception as e: | |
print(f"Error during LLM call for complexity determination for query '{query}': {e}") | |
print(traceback.format_exc()) | |
print(f"Defaulting query '{query}' to 'complex' due to error.") | |
return "none", "complex" | |
else: | |
print("Primary client not available for complexity determination. Defaulting query to 'simple'.") | |
return "none", "simple" # Default to simple if primary client is not available | |
# Function to summarize chat history (using primary client if available) | |
def summarize_chat_history(chat_history: list[dict]) -> str: | |
""" | |
Summarizes the provided chat history using the LLM. | |
Uses the primary client for summarization if available. | |
""" | |
print("\n--- Summarizing chat history ---") | |
if not chat_history: | |
print("Chat history is empty, no summarization needed.") | |
return "" | |
history_text = "\n".join([f"{msg['role']}: {msg['content']}" for msg in chat_history]) | |
prompt_for_summary = f""" | |
Please provide a concise summary of the following conversation history. | |
Conversation History: | |
{history_text} | |
Summary: | |
""" | |
if primary_client: | |
try: | |
messages_summary = [{"role": "user", "content": prompt_for_summary}] | |
summary_response = primary_client.chat_completion( | |
messages=messages_summary, | |
max_tokens=200, | |
temperature=0.3, | |
top_p=0.9 | |
).choices[0].message.content or "" | |
print("Chat history summarization successful using primary client.") | |
return summary_response.strip() | |
except Exception as e: | |
print(f"Error during LLM call for chat history summarization (primary client): {e}") | |
print(traceback.format_exc()) | |
return "Unable to summarize previous conversation." | |
else: | |
print("Primary client not available for summarization. Skipping summarization.") | |
return "Previous conversation summary not available." # Indicate summarization failed | |
# Set a window size for chat history to prevent exceeding context limits | |
CHAT_HISTORY_WINDOW_SIZE = 10 # Keep the last 10 messages (5 user, 5 assistant) | |
# Function to generate text using the LLM, incorporating tool results if available | |
def generate_text(prompt: str, tool_results: dict = None, chat_history: list[dict] = None, complexity_level: str = 'complex', determined_tools_and_complexity: dict = None) -> str: | |
""" | |
Generates text using the configured LLM (primary or fallback), optionally incorporating tool results and chat history. | |
Implements conversation windowing for long histories. | |
Includes determined_tools_and_complexity for specific tool result formatting. | |
""" | |
persona_instructions = """You are absa_ai, an AI developed on August 7, 2025, by the absa team. Your knowledge about business data comes from the company's internal Google Sheet. | |
You are a friendly and helpful chatbot. Respond to greetings appropriately (e.g., "Hello!", "Hi there!", "Habari!"). If the user uses Swahili greetings or simple conversational phrases, respond in Swahili. Otherwise, respond in English unless the query is clearly in Swahili. **Prioritize responding in Swahili if the user's query is in Swahili or contains Swahili phrases.** Handle conversational flow and ask follow-up questions when appropriate. | |
If the user asks a question about other companies or general knowledge, answer their question. However, subtly remind them that your primary expertise and purpose are related to Absa-specific information. | |
When using search results, integrate the information naturally into your response before listing the URLs as evidence or sources. Do not just list the search results verbatim. | |
""" | |
messages = [{"role": "user", "content": persona_instructions}] | |
if chat_history: | |
recent_history = chat_history[-CHAT_HISTORY_WINDOW_SIZE:] | |
print(f"Including last {len(recent_history)} messages in context.") | |
messages.extend(recent_history) | |
else: | |
print("No chat history to include in context.") | |
current_user_content = f"User Query: {prompt}\n\n" | |
if tool_results and any(tool_results.values()): | |
current_user_content += "Tool Results:\n" | |
for question, results in tool_results.items(): | |
if results is not None and results != "none": | |
current_user_content += f"--- Results for: {question} ---\n" | |
if isinstance(results, list): | |
if not results: | |
current_user_content += "No results found.\n\n" | |
else: | |
is_search_result = determined_tools_and_complexity and question in determined_tools_and_complexity and determined_tools_and_complexity[question].get('tool') == 'duckduckgo_search' | |
if is_search_result: | |
current_user_content += "Search Results (for LLM to synthesize):\n" | |
for i, result in enumerate(results): | |
if isinstance(result, dict): | |
url_info = f"\nURL: {result.get('url', 'N/A')}" if result.get('url') and result.get('url') != 'N/A' else "" | |
current_user_content += f"Result {i+1}:\nTitle: {result.get('title', 'N/A')}\nSnippet: {result.get('body', 'N/A')}{url_info}\n\n" | |
else: | |
current_user_content += f"Result {i+1}: {result}\n\n" | |
else: | |
for i, result in enumerate(results): | |
if isinstance(result, dict): | |
current_user_content += f"Result {i+1}:\n{json.dumps(result, indent=2)}\n\n" | |
else: | |
current_user_content += f"Result {i+1}: {result}\n\n" | |
elif isinstance(results, dict): | |
if not results: | |
current_user_content += "No results found.\n\n" | |
else: | |
try: | |
current_user_content += f"{json.dumps(results, indent=2)}\n\n" | |
except TypeError: | |
current_user_content += f"{str(results)}\n\n" | |
else: | |
current_user_content += f"{results}\n\n" | |
current_user_content += "Based on the User Query and Tool Results, answer ONLY the user's latest query. Integrate information from tool results naturally into your response. If a question was answered by a tool, use the tool's result directly in your response. If a tool returned an error or no results, acknowledge that and try to answer based on your general knowledge or other tool results. Maintain the language of the original query if possible, especially for simple greetings or direct questions answered by tools. When using information from search results, present the key information first, then list the relevant URLs as 'Evidence Links:' or 'Sources:' at the end of your answer for that specific question. **If the user's query was primarily in Swahili, respond in Swahili.**" | |
print("Added tool results and instruction to final prompt, with updated search result formatting instruction and Swahili preference.") | |
else: | |
current_user_content += "Based on the User Query, answer ONLY the user's latest query. **If the user's query was primarily in Swahili, respond in Swahili.**" | |
print("No tool results to add to final prompt, relying on conversation history summary, with Swahili preference added.") | |
messages.append({"role": "user", "content": current_user_content}) | |
generation_config = { | |
"temperature": 0.7, | |
"max_new_tokens": 500, | |
"top_p": 0.95, | |
"top_k": 50, | |
"do_sample": True, | |
} | |
response = "An error occurred during text generation." | |
model_used = "none" | |
try: | |
if complexity_level == 'complex' and primary_client: | |
print("Using primary client for generation.") | |
response = primary_client.chat_completion( | |
messages=messages, | |
max_tokens=generation_config.get("max_new_tokens", 512), | |
temperature=generation_config.get("temperature", 0.7), | |
top_p=generation_config.get("top_p", 0.95) | |
).choices[0].message.content or "" | |
print("LLM generation successful using primary client.") | |
model_used = "primary" | |
elif fallback_client: # Use fallback if complex failed or complexity is simple | |
print("Using fallback client for generation.") | |
response = fallback_client.chat_completion( | |
messages=messages, | |
max_tokens=generation_config.get("max_new_tokens", 512), | |
temperature=generation_config.get("temperature", 0.7), | |
top_p=generation_config.get("top_p", 0.95) | |
).choices[0].message.content or "" | |
print("LLM generation successful using fallback client.") | |
model_used = "fallback" | |
else: | |
print("No LLM client available for generation.") | |
response = "Sorry, I am currently unable to generate a response." | |
model_used = "none" | |
return response.strip(), model_used | |
except Exception as e: | |
print(f"Error during final LLM generation (primary or fallback): {e}") | |
print(traceback.format_exc()) | |
return "An error occurred while generating the final response.", "error" | |
# Function to log conversation data to the Hugging Face Dataset and push | |
def log_conversation(user_query: str, model_response: str, tool_details: dict = None, user_id: str = None, model_used: str = None, device_id: str = None): | |
""" | |
Logs conversation data (query, response, timestamp, optional details, device ID) to the Hugging Face Dataset | |
and pushes the changes to the Hub. Includes which model was used and the device ID. | |
""" | |
global conversation_dataset | |
global dataset_name | |
print("\n--- Attempting to log conversation to Hugging Face Dataset ---") | |
if conversation_dataset is None: | |
print("Warning: Hugging Face dataset not loaded or created. Skipping conversation logging.") | |
return | |
if not HF_TOKEN: | |
print("Warning: HF_TOKEN not set. Cannot push dataset to Hub. Logging locally only.") | |
# Continue logging to the dataset object in memory even without a token to push | |
try: | |
timestamp = datetime.now().isoformat() | |
tool_details_json = json.dumps(tool_details) if tool_details is not None else None | |
user_id_val = user_id if user_id is not None else "anonymous" | |
device_id_val = device_id if device_id is not None else "unknown_device" | |
model_used_val = model_used if model_used is not None else "unknown" | |
new_log_entry = { | |
'timestamp': timestamp, | |
'user_id': user_id_val, | |
'device_id': device_id_val, | |
'user_query': user_query, | |
'model_response': model_response, | |
'tool_details': tool_details_json, | |
'model_used': model_used_val | |
} | |
new_row_dataset = Dataset.from_dict({key: [value] for key, value in new_log_entry.items()}) | |
if 'train' in conversation_dataset: | |
conversation_dataset['train'] = concatenate_datasets([conversation_dataset['train'], new_row_dataset]) | |
else: | |
log_schema = { | |
'timestamp': 'string', | |
'user_id': 'string', | |
'device_id': 'string', | |
'user_query': 'string', | |
'model_response': 'string', | |
'tool_details': 'string', | |
'model_used': 'string' | |
} | |
conversation_dataset = DatasetDict({'train': new_row_dataset.cast(log_schema)}) | |
print("Conversation data successfully added to the dataset object.") | |
if HF_TOKEN: | |
print(f"Attempting to push dataset to {dataset_name}...") | |
conversation_dataset.push_to_hub(dataset_name, token=HF_TOKEN, commit_message=f"Add conversation log: {timestamp}") | |
print(f"Successfully pushed dataset to {dataset_name}.") | |
else: | |
print("Skipping push to Hugging Face Hub: HF_TOKEN not set.") | |
except Exception as e: | |
print(f"An unexpected error occurred during Hugging Face Dataset logging and pushing: {e}") | |
print(traceback.format_exc()) | |
# Caching Implementation | |
FAISS_INDEX_FILE = "cache.index" | |
CACHE_METADATA_FILE = "cache_metadata.pkl" | |
faiss_index = None | |
cache_metadata = {} | |
EMBEDDING_DIM = 384 | |
CACHE_SIMILARITY_THRESHOLD = 0.9 | |
CACHE_EXPIRATION_DAYS = 7 | |
def initialize_cache(): | |
"""Initializes or loads the FAISS index and cache metadata.""" | |
global faiss_index, cache_metadata | |
print("\n--- Initializing Cache ---") | |
if embedder is None: | |
print("Warning: Embedder not available. Cache will not be functional.") | |
return | |
if os.path.exists(FAISS_INDEX_FILE) and os.path.exists(CACHE_METADATA_FILE): | |
print("Loading existing cache...") | |
try: | |
faiss_index = faiss.read_index(FAISS_INDEX_FILE) | |
with open(CACHE_METADATA_FILE, 'rb') as f: | |
cache_metadata = pickle.load(f) | |
print(f"Cache loaded successfully. Current cache size: {faiss_index.ntotal}") | |
cleanup_expired_cache_entries() | |
except Exception as e: | |
print(f"Error loading cache files: {e}. Initializing new cache.") | |
print(traceback.format_exc()) | |
faiss_index = faiss.IndexFlatL2(EMBEDDING_DIM) | |
cache_metadata = {} | |
save_cache() | |
else: | |
print("No existing cache found. Initializing new cache.") | |
faiss_index = faiss.IndexFlatL2(EMBEDDING_DIM) | |
cache_metadata = {} | |
save_cache() | |
def save_cache(): | |
"""Saves the FAISS index and cache metadata to files.""" | |
global faiss_index, cache_metadata | |
if faiss_index is None: | |
print("Warning: FAISS index not initialized. Cannot save cache.") | |
return | |
print("Saving cache...") | |
try: | |
faiss.write_index(faiss_index, FAISS_INDEX_FILE) | |
with open(CACHE_METADATA_FILE, 'wb') as f: | |
pickle.dump(cache_metadata, f) | |
print("Cache saved successfully.") | |
except Exception as e: | |
print(f"Error saving cache files: {e}") | |
print(traceback.format_exc()) | |
def get_query_embedding(query: str): | |
"""Generates an embedding for the given query.""" | |
if embedder is None: | |
print("Warning: Embedder not available. Cannot generate query embedding for caching.") | |
return None | |
try: | |
return embedder.encode(query, convert_to_tensor=False) | |
except Exception as e: | |
print(f"Error generating embedding for query '{query}': {e}") | |
print(traceback.format_exc()) | |
return None | |
def add_to_cache(query: str, response: str, model_used: str = None): | |
"""Adds the query, response, timestamp, and model used to the cache metadata.""" | |
global faiss_index, cache_metadata | |
if embedder is None or faiss_index is None: | |
print("Warning: Embedder or FAISS index not available. Cannot add query to cache.") | |
return | |
try: | |
query_embedding = get_query_embedding(query) | |
if query_embedding is None: | |
return | |
faiss_index.add(np.array([query_embedding])) | |
cache_id = faiss_index.ntotal - 1 | |
now = datetime.now() | |
cache_metadata[cache_id] = { | |
'query': query, | |
'response': response, | |
'timestamp': now, | |
'count': 1, | |
'model_used': model_used if model_used is not None else "unknown" | |
} | |
print(f"Added query and response to cache with ID {cache_id}. Model Used: {model_used}") | |
save_cache() | |
print(f"Current cache size: {faiss_index.ntotal}") | |
except Exception as e: | |
print(f"Error adding query to cache: {e}") | |
print(traceback.format_exc()) | |
def check_cache(query: str): | |
"""Checks the cache for a similar query and returns the cached response if found and not expired.""" | |
global faiss_index, cache_metadata | |
if faiss_index is None or embedder is None or faiss_index.ntotal == 0: | |
print("Cache is empty or not available. Skipping cache check.") | |
return None | |
try: | |
query_embedding = get_query_embedding(query) | |
if query_embedding is None: | |
return None | |
D, I = faiss_index.search(np.array([query_embedding]), 1) | |
if I[0][0] != -1 and D[0][0] <= (1 - CACHE_SIMILARITY_THRESHOLD): | |
cached_id = I[0][0] | |
print(f"Found potential cache hit with ID {cached_id} and distance {D[0][0]:.4f}.") | |
if cached_id in cache_metadata: | |
cached_data = cache_metadata[cached_id] | |
now = datetime.now() | |
if (now - cached_data['timestamp']).days <= CACHE_EXPIRATION_DAYS: | |
print(f"Cache hit! Returning cached response for query: '{query}'") | |
cache_metadata[cached_id]['timestamp'] = now | |
cache_metadata[cached_id]['count'] += 1 | |
try: | |
log_conversation( | |
user_query=query, | |
model_response=cached_data['response'], | |
tool_details={"cache_status": "hit"}, | |
user_id="anonymous", | |
model_used=cached_data.get('model_used', 'cached_unknown'), | |
device_id="unknown_device_cache_hit" # Placeholder for cache hits | |
) | |
except Exception as e: | |
print(f"Error during logging of cached response: {e}") | |
print(traceback.format_exc()) | |
save_cache() | |
return cached_data['response'] | |
else: | |
print(f"Cache entry with ID {cached_id} found but expired.") | |
else: | |
print(f"Cache ID {cached_id} found in index but not in metadata. Cache inconsistency.") | |
print(f"No suitable cache entry found for query: '{query}'") | |
return None | |
except Exception as e: | |
print(f"Error during cache check: {e}") | |
print(traceback.format_exc()) | |
return None | |
def cleanup_expired_cache_entries(): | |
"""Removes expired entries from the cache and rebuilds the FAISS index if necessary.""" | |
global faiss_index, cache_metadata | |
if faiss_index is None or faiss_index.ntotal == 0: | |
print("Cache is empty or not initialized. No expired entries to clean.") | |
return | |
print("Cleaning up expired cache entries...") | |
now = datetime.now() | |
expired_ids = [ | |
cache_id for cache_id, cached_data in cache_metadata.items() | |
if (now - cached_data['timestamp']).days > CACHE_EXPIRATION_DAYS | |
] | |
if expired_ids: | |
print(f"Found {len(expired_ids)} expired cache entries.") | |
for cache_id in expired_ids: | |
del cache_metadata[cache_id] | |
if cache_metadata: | |
print("Rebuilding FAISS index with non-expired entries...") | |
try: | |
non_expired_embeddings = [] | |
non_expired_metadata_list = sorted(cache_metadata.items()) | |
for cache_id, cached_data in non_expired_metadata_list: | |
original_query = cached_data.get('query') | |
if original_query and embedder: | |
try: | |
non_expired_embeddings.append(embedder.encode(original_query, convert_to_tensor=False).tolist()) | |
except Exception as e: | |
print(f"Error re-embedding query '{original_query}': {e}. Skipping.") | |
if non_expired_embeddings: | |
print(f"Re-embedding {len(non_expired_embeddings)} non_expired_queries.") | |
faiss_index = faiss.IndexFlatL2(EMBEDDING_DIM) | |
faiss_index.add(np.array(non_expired_embeddings)) | |
print(f"FAISS index rebuilt. New size: {faiss_index.ntotal}") | |
else: | |
print("No non-expired entries to rebuild FAISS index. Clearing index.") | |
faiss_index = faiss.IndexFlatL2(EMBEDDING_DIM) | |
cache_metadata = {} | |
except Exception as e: | |
print(f"Error rebuilding FAISS index: {e}") | |
print(traceback.format_exc()) | |
print("Clearing cache due to rebuild error.") | |
faiss_index = faiss.IndexFlatL2(EMBEDDING_DIM) | |
cache_metadata = {} | |
else: | |
print("All cache entries expired. Clearing FAISS index and metadata.") | |
faiss_index = faiss.IndexFlatL2(EMBEDDING_DIM) | |
cache_metadata = {} | |
save_cache() | |
else: | |
print("No expired cache entries found.") | |
# Main chat function with query breakdown and tool execution | |
async def chat(query: str, chat_history: list[list], api_key: str, device_id: str = None): | |
""" | |
Processes user queries by breaking down multi-part queries, determining and | |
executing appropriate tools for each question asynchronously, and synthesizing results | |
using the LLM. Incorporates caching for repeated questions and routes | |
to primary or fallback model based on complexity. Accepts device_id for logging. | |
""" | |
internal_chat_history = [] | |
for entry in chat_history: | |
if isinstance(entry, list) and len(entry) == 2: | |
user_msg, assistant_msg = entry | |
if user_msg is not None: | |
internal_chat_history.append({"role": "user", "content": str(user_msg)}) | |
if assistant_msg is not None: | |
internal_chat_history.append({"role": "assistant", "content": str(assistant_msg)}) | |
print(f"\n--- chat function received new query ---") | |
print(f" query: {query}") | |
print(f" device_id: {device_id}") | |
print(f" Validating against SECRET_API_KEY: {'Yes' if SECRET_API_KEY else 'No'}") | |
print(f" chat_history (converted): {internal_chat_history}") | |
print(f" api_key from UI received: {'Yes' if api_key else 'No'}") | |
if not SECRET_API_KEY: | |
print("Error: APP_API_KEY environment variable not set.") | |
error_response = "API key validation failed: Application not configured correctly. APP_API_KEY secret is missing." | |
log_conversation( | |
user_query=query, | |
model_response=error_response, | |
tool_details={"validation_status": "failed", "reason": "secret_not_set"}, | |
user_id="unknown", | |
device_id=device_id | |
) | |
return error_response | |
if api_key != SECRET_API_KEY: | |
print("Error: API key from UI does not match SECRET_API_KEY.") | |
error_response = "API key validation failed: Invalid API key provided." | |
log_conversation( | |
user_query=query, | |
model_response=error_response, | |
tool_details={"validation_status": "failed", "reason": "invalid_api_key"}, | |
user_id="unknown", | |
device_id=device_id | |
) | |
return error_response | |
cached_response = check_cache(query) | |
if cached_response: | |
print(f"Returning cached response for query: '{query}'") | |
return cached_response | |
print("\n--- Breaking down query ---") | |
if primary_client: | |
prompt_for_question_breakdown = f""" | |
Analyze the following query and list each distinct question found within it. | |
Present each question on a new line, starting with a hyphen. | |
Query: {query} | |
""" | |
try: | |
messages_question_breakdown = primary_client.chat_completion( | |
messages=[{"role": "user", "content": prompt_for_question_breakdown}], | |
max_tokens=100, | |
temperature=0.1, | |
top_p=0.9 | |
).choices[0].message.content or "" | |
individual_questions = [line.strip() for line in messages_question_breakdown.split('\n') if line.strip()] | |
cleaned_questions = [re.sub(r'^[-*]?\s*', '', q) for q in individual_questions if not q.strip().lower().startswith('note:')] | |
print("Individual questions identified:") | |
for q in cleaned_questions: | |
print(f"- {q}") | |
except Exception as e: | |
print(f"Error during LLM call for question breakdown (primary client): {e}") | |
print(traceback.format_exc()) | |
print(f"Proceeding with original query as a single question due to breakdown error.") | |
cleaned_questions = [query] | |
else: | |
print("Primary client not available for question breakdown. Proceeding with original query as a single question.") | |
cleaned_questions = [query] | |
print("\n--- Determining tools and complexity per question ---") | |
determined_tools_and_complexity = {} | |
for question in cleaned_questions: | |
print(f"\nAnalyzing question for tool determination and complexity: '{question}'") | |
tool, complexity = determine_tool_usage(question) | |
determined_tools_and_complexity[question] = {"tool": tool, "complexity": complexity} | |
print(f"Determined tool and complexity for '{question}': Tool='{tool}', Complexity='{complexity}'") | |
print("\nSummary of determined tools and complexity per question:") | |
for question, details in determined_tools_and_complexity.items(): | |
print(f"'{question}': Tool='{details['tool']}', Complexity='{details['complexity']}'") | |
print("\n--- Executing tools asynchronously and collecting results ---") | |
tool_results = {} | |
tasks = [] | |
questions_to_process = [] | |
for question, details in determined_tools_and_complexity.items(): | |
tool = details['tool'] | |
print(f"\nQueueing tool '{tool}' for question: '{question}')") | |
questions_to_process.append(question) | |
if tool == "date_calculation": | |
tasks.append(perform_date_calculation(question)) | |
elif tool == "duckduckgo_search": | |
tasks.append(perform_duckduckgo_search(question)) | |
elif tool == "business_info_retrieval": | |
loop = asyncio.get_event_loop() | |
tasks.append(loop.run_in_executor(None, retrieve_business_info, question)) | |
elif tool == "none": | |
print(f"Skipping tool execution for question: '{question}' as tool is 'none'. LLM will handle.") | |
tasks.append(asyncio.Future()) | |
tasks[-1].set_result("none") | |
try: | |
results = await asyncio.gather(*tasks, return_exceptions=True) | |
print("\n--- Asynchronous Tool Execution Results ---") | |
for i, question in enumerate(questions_to_process): | |
result = results[i] | |
if isinstance(result, Exception): | |
print(f"Error executing tool for question '{question}': {result}") | |
tool_results[question] = f"An error occurred while fetching information for this part of your query: {result}" | |
else: | |
print(f"Result for question '{question}': {result}") | |
tool_results[question] = result | |
print("\n-----------------------------------------") | |
except Exception as e: | |
print(f"An error occurred during asynchronous tool execution: {e}") | |
print(traceback.format_exc()) | |
for question in questions_to_process: | |
tool_results[question] = f"An error occurred while fetching information for this part of your query: {e}" | |
print("\n--- Collected Tool Results ---") | |
if tool_results: | |
for question, result in tool_results.items(): | |
print(f"\nQuestion: {question}") | |
print(f"Result: {result}") | |
else: | |
print("No tool results were collected.") | |
print("\n--------------------------") | |
print("\n--- Generating final response ---") | |
overall_complexity = 'simple' | |
for details in determined_tools_and_complexity.values(): | |
if details['complexity'] == 'complex': | |
overall_complexity = 'complex' | |
break | |
print(f"Overall query complexity determined as: '{overall_complexity}'") | |
final_response, model_used = generate_text( | |
query, | |
tool_results, | |
internal_chat_history, | |
complexity_level=overall_complexity, | |
determined_tools_and_complexity=determined_tools_and_complexity | |
) | |
print("\n--- Final Response from LLM ---") | |
print(final_response) | |
print(f"Model Used for Generation: {model_used}") | |
print("\n----------------------------") | |
add_to_cache(query, final_response, model_used=model_used) | |
try: | |
user_id_to_log = "anonymous" | |
if internal_chat_history: | |
for turn in internal_chat_history: | |
if turn.get("role") == "user" and "user_id:" in turn.get("content", "").lower(): | |
match = re.search(r"user_id:\s*(\S+)", turn.get("content", ""), re.IGNORECASE) | |
if match: | |
user_id_to_log = match.group(1) | |
break | |
logged_tool_details = {} | |
for question, details in determined_tools_and_complexity.items(): | |
logged_tool_details[question] = { | |
"tool_used": details['tool'], | |
"complexity": details['complexity'], | |
"raw_output": tool_results.get(question) | |
} | |
logged_tool_details["cache_status"] = "miss" | |
log_conversation( | |
user_query=query, | |
model_response=final_response, | |
tool_details=logged_tool_details, | |
user_id=user_id_to_log, | |
model_used=model_used, | |
device_id=device_id | |
) | |
except Exception as e: | |
print(f"Error during conversation logging after response generation: {e}") | |
print(traceback.format_exc()) | |
return final_response | |
# Initialize Flask app | |
app = Flask(__name__) | |
# Initialize Twilio client globally | |
twilio_client = None | |
if TWILIO_ACCOUNT_SID and TWILIO_AUTH_TOKEN: | |
try: | |
twilio_client = Client(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN) | |
print("Twilio client initialized globally for webhook.") | |
except Exception as e: | |
print(f"Error initializing global Twilio client: {e}") | |
print(traceback.format_exc()) | |
twilio_client = None # Ensure it's None on failure | |
else: | |
print("Twilio credentials missing. Global Twilio client not initialized. Cannot send messages.") | |
# Define the root route for checking if the app is running | |
def index(): | |
"""Basic route to indicate the Flask app is running.""" | |
return "Flask application is running. Webhook is at /webhook", 200 | |
# Define the Twilio webhook route | |
async def twilio_webhook(): | |
""" | |
Handles incoming POST requests from Twilio containing WhatsApp message data. | |
Processes the message using the chat function (which includes logging, caching, | |
and tool use) and sends the response back via Twilio. | |
""" | |
# Access the incoming request data | |
incoming_data = request.form | |
print("Received incoming message data from Twilio:") | |
print(incoming_data) | |
# Access the incoming message body | |
message_body = incoming_data.get('Body') | |
# Access the sender's WhatsApp number | |
sender_number = incoming_data.get('From') # This will be the device_id for logging | |
# Print the extracted message body and sender number | |
print(f"Message Body: {message_body}") | |
print(f"Sender Number (Device ID): {sender_number}") | |
# --- Process the message using the chat function --- | |
response_text = "An error occurred while processing your message." # Default error response | |
try: | |
# Call the chat function with the message_body as query, empty history, | |
# the loaded SECRET_API_KEY, and the sender_number as device_id. | |
# Pass an empty list for chat_history as each WhatsApp message is a new turn for the LLM context | |
# Ensure chat function is awaited as it's async | |
response_text = await chat( | |
query=message_body, | |
chat_history=[], # Empty history for a new WhatsApp message turn | |
api_key=SECRET_API_KEY, # Use the loaded API key (assumed global) | |
device_id=sender_number # Use sender_number as device_id | |
) | |
print(f"Generated response: {response_text}") | |
# --- Send the generated response back via Twilio --- | |
# Use the globally available twilio_client and TWILIO_WHATSAPP_NUMBER | |
if twilio_client and TWILIO_WHATSAPP_NUMBER and sender_number: | |
try: | |
message = twilio_client.messages.create( | |
body=response_text, | |
from_=TWILIO_WHATSAPP_NUMBER, | |
to=sender_number | |
) | |
print(f"Message sent successfully to {sender_number}. SID: {message.sid}") | |
except Exception as e: | |
print(f"Error sending message via Twilio to {sender_number}: {e}") | |
print(traceback.format_exc()) | |
# Log the Twilio send failure (optional) | |
pass # Continue and return 200 even if sending fails | |
elif not twilio_client: | |
print("Twilio client not initialized. Cannot send message.") | |
elif not TWILIO_WHATSAPP_NUMBER: | |
print("TWILIO_WHATSAPP_NUMBER not set. Cannot send message.") | |
elif not sender_number: | |
print("Sender number not available. Cannot send message.") | |
# Return an empty response or a simple acknowledgement to Twilio | |
# Twilio expects a 200 OK response quickly. | |
return '', 200 | |
except Exception as e: | |
print(f"Error processing message with chat function: {e}") | |
print(traceback.format_exc()) | |
# If processing with chat fails, attempt to send an error message back via Twilio | |
error_response_to_user = "Sorry, I encountered an internal error while processing your request." | |
if twilio_client and TWILIO_WHATSAPP_NUMBER and sender_number: | |
try: | |
twilio_client.messages.create( | |
body=error_response_to_user, | |
from_=TWILIO_WHATSAPP_NUMBER, | |
to=sender_number | |
) | |
print(f"Sent error message to {sender_number}.") | |
except Exception as send_e: | |
print(f"Error sending error message via Twilio to {sender_number}: {send_e}") | |
print(traceback.format_exc()) | |
# Log the overall processing failure (optional) | |
# log_conversation( | |
# user_query=query, | |
# model_response=error_response_to_user, | |
# tool_details={"processing_status": "failed", "error": str(e)}, | |
# user_id="unknown", # Or extract user_id if possible from query | |
# device_id=device_id, # Pass device_id | |
# model_used="error" | |
# ) | |
return '', 500 # Return an error status if processing fails | |
# --- Initialization block (similar to if __name__ == "__main__": in original script) --- | |
# This part runs when the script starts, e.g., on Hugging Face Spaces startup. | |
print("\n--- Application Startup ---") | |
# Load/Create Hugging Face Dataset on startup | |
try: | |
print(f"Attempting to load dataset from {dataset_name} on startup...") | |
conversation_dataset = load_dataset(dataset_name, token=HF_TOKEN) | |
print(f"Successfully loaded existing dataset from {dataset_name} on startup.") | |
print(conversation_dataset) | |
except Exception as e: | |
print(f"Dataset not found or failed to load from {dataset_name} on startup: {e}") | |
print("Creating a new dataset object on startup...") | |
log_schema = { | |
'timestamp': 'string', | |
'user_id': 'string', | |
'device_id': 'string', | |
'user_query': 'string', | |
'model_response': 'string', | |
'tool_details': 'string', | |
'model_used': 'string' | |
} | |
empty_data = {col: [] for col in log_schema.keys()} | |
new_dataset = Dataset.from_dict(empty_data) | |
conversation_dataset = DatasetDict({'train': new_dataset}) | |
print(f"Created a new empty dataset object with schema: {log_schema}") | |
print(conversation_dataset) | |
authenticate_google_sheets() | |
load_business_info() | |
if nlp is None: | |
print("Warning: SpaCy model not loaded. Sentence splitting may not work correctly.") | |
if embedder is None: | |
print("Warning: Sentence Transformer (embedder) not loaded. RAG and Caching will not be available.") | |
if reranker is None: | |
print("Warning: Cross-Encoder Reranker not loaded. Re-ranking of RAG results will not be performed.") | |
if not business_info_available: | |
print("Warning: Business information (Google Sheet data) not loaded successfully. " | |
"RAG will not be available. Please ensure the GOOGLE_BASE64_CREDENTIALS secret is set correctly.") | |
# Initialize the cache | |
initialize_cache() | |
# Add this block to explicitly run the Flask app when the script is executed directly | |
if __name__ == '__main__': | |
# Ensure asyncio loop is set up for async Flask | |
import platform | |
if platform.system() == 'Windows': | |
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) | |
# Use port 7860, which is the default for Gradio and commonly used in Spaces | |
# debug=True is useful for development, but might be set to False for production | |
print("\n--- Running Flask app ---") | |
# The Hugging Face Spaces environment typically sets the PORT environment variable. | |
# We should use the provided port if available, otherwise default. | |
port = int(os.environ.get('PORT', 7860)) | |
app.run(debug=True, host='0.0.0.0', port=port) | |