Addaci's picture
Create app.py
a85a770 verified
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
# Load the Mistral-7B-v0.2 model and tokenizer
model_name = "mistralai/Mistral-7B-v0.2"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Define the chatbot function
def chatbot(input_text, history=[]):
# Combine history and current input
chat_history = " ".join([f"User: {h[0]} Assistant: {h[1]}" for h in history])
input_prompt = f"{chat_history} User: {input_text} Assistant:"
inputs = tokenizer(input_prompt, return_tensors="pt", truncation=True)
# Generate response
outputs = model.generate(
**inputs,
max_length=512,
do_sample=True,
temperature=0.7,
top_p=0.9
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True).split("Assistant:")[-1].strip()
# Update history
history.append((input_text, response))
return response, history
# Gradio Interface
with gr.Blocks() as interface:
gr.Markdown("### Mistral-7B-v0.2 Chatbot")
gr.Markdown("This chatbot is powered by the Mistral-7B-v0.2 model for summarization and general conversation.")
with gr.Row():
chatbot_box = gr.Chatbot(label="Chatbot")
with gr.Row():
user_input = gr.Textbox(label="Your Input", placeholder="Type your message here...")
submit_button = gr.Button("Submit")
with gr.Row():
reset_button = gr.Button("Reset Chat")
# Functionality
submit_button.click(chatbot, inputs=[user_input, chatbot_box], outputs=[chatbot_box, chatbot_box])
reset_button.click(lambda: [], inputs=[], outputs=[chatbot_box])
# Launch the app
interface.launch()