Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import torch | |
# Load the model and tokenizer from Hugging Face | |
model_name = "fahadMizan/chatbot" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
# Define the chatbot function | |
def chatbot(input_text): | |
inputs = tokenizer(input_text, return_tensors="pt") | |
outputs = model.generate(inputs["input_ids"], max_length=100) | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return response | |
# Create the Gradio interface | |
iface = gr.Interface(fn=chatbot, | |
inputs="text", | |
outputs="text", | |
title="Fahad Chatbot", | |
description="A chatbot using the 'fahadMizan/chatbot' model from Hugging Face.") | |
# Launch the app | |
if __name__ == "__main__": | |
iface.launch() | |