Spaces:
Sleeping
Sleeping
import streamlit as st | |
import json | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline | |
# Initialize Streamlit | |
st.set_page_config(page_title="PhoBert Q&A ChatBot") | |
st.markdown( | |
""" | |
<style> | |
.reportview-container { | |
background: url('Background.png'); | |
background-size: cover; | |
} | |
.sidebar .sidebar-content { | |
background: url('Background.png'); | |
background-size: cover; | |
} | |
.header-logo { | |
width: 200px; | |
display: block; | |
margin-left: auto; | |
margin-right: auto; | |
} | |
</style> | |
""", | |
unsafe_allow_html=True | |
) | |
# Add logo | |
st.markdown( | |
""" | |
<div> | |
<img src="logo.png" class="header-logo"> | |
</div> | |
""", | |
unsafe_allow_html=True | |
) | |
st.header("PhoBert Q&A ChatBot") | |
if 'chat_history' not in st.session_state: | |
st.session_state['chat_history'] = [] | |
# User input | |
user_input = st.text_input("Input :", key="input") | |
submit = st.button("Chat With Bot") | |
# Load model and tokenizer | |
model_path = "minhdang14902/PhoBert_Edu" | |
model = AutoModelForSequenceClassification.from_pretrained(model_path) | |
tokenizer = AutoTokenizer.from_pretrained(model_path) | |
chatbot = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) | |
# Load intents from file | |
def load_intents_from_txt(input_filename): | |
intents = {} | |
with open(input_filename, "r", encoding='utf-8') as f: | |
for line in f: | |
key, value = line.strip().split(": ", 1) | |
intents[key] = json.loads(value) | |
return intents | |
# Load id2label and label2id from file | |
id2label = {} | |
with open("id2label.txt", "r", encoding='utf-8') as f: | |
for line in f: | |
id, label = line.strip().split(": ") | |
id2label[int(id)] = label | |
label2id = {} | |
with open("label2id.txt", "r", encoding='utf-8') as f: | |
for line in f: | |
label, id = line.strip().split(": ") | |
label2id[label] = int(id) | |
intents = load_intents_from_txt("intents.txt") | |
def get_response(user_input): | |
st.subheader("The Answer is:") | |
st.write(user_input) | |
result = chatbot(user_input)[0] | |
score = result['score'] | |
st.write(score) | |
if score < 0.001: | |
return "Sorry, I can't answer that" | |
label = label2id[result['label']] | |
st.write(label) | |
return intents['intents'][label]['responses'] | |
if submit and user_input: | |
st.session_state['chat_history'].append(("User", user_input)) | |
response = get_response(user_input) | |
st.subheader("The Response is:") | |
message = st.empty() | |
result = "" | |
for chunk in response: | |
result += chunk | |
message.markdown(result + "β ") | |
message.markdown(result) | |
st.session_state['chat_history'].append(("Bot", result)) | |
for i, (sender, message) in enumerate(st.session_state['chat_history']): | |
if sender == "User": | |
st.text_area(f"User {i}:", value=message, height=100, max_chars=None, key=f"user_{i}") | |
else: | |
st.text_area(f"Bot {i}:", value=message, height=100, max_chars=None, key=f"bot_{i}") | |