Spaces:
Runtime error
Runtime error
import os | |
import pandas as pd | |
import streamlit as st | |
import logging | |
from transformers import pipeline | |
# Configure logging | |
logging.basicConfig(level=logging.DEBUG) | |
# Load Hugging Face model | |
MODEL_NAME = "mistralai/Mistral-Large-Instruct-2407" # Hugging Face Model name | |
llm_pipeline = pipeline("text-generation", model=MODEL_NAME) | |
# Load datasets | |
def load_datasets(): | |
"""Load datasets from CSV files.""" | |
try: | |
electronics_df = pd.read_csv('electronics.csv') | |
fashion_df = pd.read_csv('fashion.csv') | |
return electronics_df, fashion_df | |
except Exception as e: | |
logging.error(f"Error loading datasets: {e}") | |
st.error("Error loading datasets.") | |
st.stop() | |
electronics_df, fashion_df = load_datasets() | |
# Keywords for routing queries | |
electronics_keywords = [ | |
'electronics', 'device', 'gadget', 'battery', 'performance', | |
'phone', 'mobile', 'laptop', 'tv', 'bluetooth', 'speakers', | |
'washing machine', 'headphones', 'camera', 'tablet', 'charger', | |
'smartwatch', 'refrigerator' | |
] | |
fashion_keywords = [ | |
'fashion', 'clothing', 'size', 'fit', 'material', 'shirt', | |
'pants', 'coats', 'shoes', 'girls dress', 'sarees', 'skirts', | |
'jackets', 'sweaters', 'suits', 'accessories', 't-shirts' | |
] | |
# LLM-based function | |
def query_llm(prompt): | |
"""Query the LLM for responses.""" | |
try: | |
responses = llm_pipeline(prompt, max_length=150, num_return_sequences=1) | |
return responses[0]['generated_text'].strip() | |
except Exception as e: | |
logging.error(f"Error querying the LLM: {e}") | |
return "Sorry, I'm having trouble processing your request right now." | |
def determine_category(query): | |
"""Determine the category based on the query.""" | |
query_lower = query.lower() | |
if any(keyword in query_lower for keyword in electronics_keywords): | |
logging.debug(f"Query '{query}' categorized as 'electronics'.") | |
return 'electronics' | |
elif any(keyword in query_lower for keyword in fashion_keywords): | |
logging.debug(f"Query '{query}' categorized as 'fashion'.") | |
return 'fashion' | |
else: | |
logging.debug(f"Query '{query}' categorized as 'general'.") | |
return 'general' | |
# Fetch response based on query | |
def get_response(user_input): | |
"""Determine the category and fetch the appropriate response.""" | |
if 'hi' in user_input.lower() or 'hello' in user_input.lower(): | |
return "Hi, welcome to the customer support chatbot. How can I help you?" | |
category = determine_category(user_input) | |
if category == 'electronics': | |
response = electronics_response(user_input) | |
elif category == 'fashion': | |
response = fashion_response(user_input) | |
else: | |
# Use LLM for more complex queries | |
response = query_llm(user_input) | |
return response | |
# Streamlit Interface remains the same | |
def main(): | |
st.title("Customer Support Chatbot") | |
if 'chat_history' not in st.session_state: | |
st.session_state.chat_history = [] | |
user_input = st.text_input("Type your message here:") | |
if st.button("Send"): | |
if user_input: | |
response_message = get_response(user_input) | |
st.session_state.chat_history.append({"role": "user", "content": user_input}) | |
st.session_state.chat_history.append({"role": "assistant", "content": response_message}) | |
for message in st.session_state.chat_history: | |
st.markdown(f"{message['role'].capitalize()}: {message['content']}") | |
if __name__ == "__main__": | |
main() | |