Spaces:
Running
Running
import gradio as gr | |
import transformers | |
import torch | |
# Function to load model and process inputs | |
def chatbot(input_text): | |
# Load a pre-trained mental health model (e.g., the one you selected: 'mental_health_chatbot') | |
model_name = "thrishala/mental_health_chatbot" # Update with your selected model | |
model = transformers.AutoModelForCausalLM.from_pretrained(model_name) | |
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name) | |
# Process input and generate response | |
inputs = tokenizer(input_text, return_tensors="pt") | |
outputs = model.generate(**inputs) | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return response | |
# Define the Gradio interface | |
interface = gr.Interface(fn=chatbot, inputs="text", outputs="text") | |
# Launch the Gradio app | |
interface.launch() | |