Spaces:
Sleeping
Sleeping
File size: 1,196 Bytes
4ff1cad 6198f9a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
import gradio as gr
from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
# Load the Blenderbot model and tokenizer
MODEL_NAME = "facebook/blenderbot-3B"
tokenizer = BlenderbotTokenizer.from_pretrained(MODEL_NAME)
model = BlenderbotForConditionalGeneration.from_pretrained(MODEL_NAME)
def chatbot_response(user_input, chat_history=[]):
"""Generates a response from Blenderbot based on user input."""
inputs = tokenizer(user_input, return_tensors="pt")
reply_ids = model.generate(**inputs)
response = tokenizer.decode(reply_ids[0], skip_special_tokens=True)
chat_history.append((user_input, response))
return response, chat_history
# Set up Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# 🤖 Blenderbot 3B Chatbot")
chatbot = gr.Chatbot()
user_input = gr.Textbox(label="Your message")
submit_btn = gr.Button("Send")
clear_btn = gr.Button("Clear Chat")
chat_state = gr.State([])
submit_btn.click(chatbot_response, inputs=[user_input, chat_state], outputs=[chatbot, chat_state])
clear_btn.click(lambda: ([], []), inputs=[], outputs=[chatbot, chat_state])
# Launch the chatbot
demo.launch()
|