import gradio as gr from transformers import AutoModelForSequenceClassification, AutoTokenizer import torch import torch.nn.functional as F # Load MentalBERT model & tokenizer MODEL_NAME = "mental/mental-bert-base-uncased" tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) model = AutoModelForSequenceClassification.from_pretrained( MODEL_NAME, num_labels=2, problem_type="single_label_classification" ) LABELS = { "neutral": {"index": 0, "description": "Emotionally balanced or calm"}, "emotional": {"index": 1, "description": "Showing emotional content"} } def analyze_text(text): # Tokenize input inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) # Get model predictions with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits probs = F.softmax(logits, dim=-1)[0] # Get emotion scores emotions = { label: float(probs[info["index"]]) for label, info in LABELS.items() } return emotions # Create Gradio interface iface = gr.Interface( fn=analyze_text, inputs=gr.Textbox(label="Enter text to analyze", lines=3), outputs=gr.Json(label="Emotion Analysis"), title="MentalBERT Emotion Analysis", description="Analyze the emotional content of text using MentalBERT", examples=[ ["I feel really happy today!"], ["I'm feeling quite stressed and overwhelmed"], ["The weather is nice outside"] ] ) # Launch the interface iface.launch()