Spaces:
Runtime error
Runtime error
File size: 3,531 Bytes
a927596 2a55698 a927596 2a55698 1026055 a927596 2a55698 a927596 2a55698 a927596 2a55698 a927596 3f13569 a927596 3f13569 a927596 3f13569 a927596 2a55698 a927596 2a55698 a927596 2a55698 a927596 2a55698 a927596 2a55698 a927596 2a55698 a927596 e587405 a927596 e587405 a927596 e587405 2a55698 a927596 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
import os
import pandas as pd
import streamlit as st
import logging
from transformers import pipeline
# Configure logging
logging.basicConfig(level=logging.DEBUG)
# Load Hugging Face model
MODEL_NAME = "mistralai/Mistral-Large-Instruct-2407" # Hugging Face Model name
llm_pipeline = pipeline("text-generation", model=MODEL_NAME)
# Load datasets
def load_datasets():
"""Load datasets from CSV files."""
try:
electronics_df = pd.read_csv('electronics.csv')
fashion_df = pd.read_csv('fashion.csv')
return electronics_df, fashion_df
except Exception as e:
logging.error(f"Error loading datasets: {e}")
st.error("Error loading datasets.")
st.stop()
electronics_df, fashion_df = load_datasets()
# Keywords for routing queries
electronics_keywords = [
'electronics', 'device', 'gadget', 'battery', 'performance',
'phone', 'mobile', 'laptop', 'tv', 'bluetooth', 'speakers',
'washing machine', 'headphones', 'camera', 'tablet', 'charger',
'smartwatch', 'refrigerator'
]
fashion_keywords = [
'fashion', 'clothing', 'size', 'fit', 'material', 'shirt',
'pants', 'coats', 'shoes', 'girls dress', 'sarees', 'skirts',
'jackets', 'sweaters', 'suits', 'accessories', 't-shirts'
]
# LLM-based function
def query_llm(prompt):
"""Query the LLM for responses."""
try:
responses = llm_pipeline(prompt, max_length=150, num_return_sequences=1)
return responses[0]['generated_text'].strip()
except Exception as e:
logging.error(f"Error querying the LLM: {e}")
return "Sorry, I'm having trouble processing your request right now."
def determine_category(query):
"""Determine the category based on the query."""
query_lower = query.lower()
if any(keyword in query_lower for keyword in electronics_keywords):
logging.debug(f"Query '{query}' categorized as 'electronics'.")
return 'electronics'
elif any(keyword in query_lower for keyword in fashion_keywords):
logging.debug(f"Query '{query}' categorized as 'fashion'.")
return 'fashion'
else:
logging.debug(f"Query '{query}' categorized as 'general'.")
return 'general'
# Fetch response based on query
def get_response(user_input):
"""Determine the category and fetch the appropriate response."""
if 'hi' in user_input.lower() or 'hello' in user_input.lower():
return "Hi, welcome to the customer support chatbot. How can I help you?"
category = determine_category(user_input)
if category == 'electronics':
response = electronics_response(user_input)
elif category == 'fashion':
response = fashion_response(user_input)
else:
# Use LLM for more complex queries
response = query_llm(user_input)
return response
# Streamlit Interface remains the same
def main():
st.title("Customer Support Chatbot")
if 'chat_history' not in st.session_state:
st.session_state.chat_history = []
user_input = st.text_input("Type your message here:")
if st.button("Send"):
if user_input:
response_message = get_response(user_input)
st.session_state.chat_history.append({"role": "user", "content": user_input})
st.session_state.chat_history.append({"role": "assistant", "content": response_message})
for message in st.session_state.chat_history:
st.markdown(f"{message['role'].capitalize()}: {message['content']}")
if __name__ == "__main__":
main()
|