import streamlit as st import torch from transformers import AutoModelForSequenceClassification, AutoTokenizer # Load the fine-tuned model and tokenizer at startup model_name = "EbukaGaus/EbukaMBert" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) # Define the label mapping label_mapping = {0: 'Neutral', 1: 'Negative', 2: 'Positive'} # Define a function to predict sentiment def predict_sentiment(text: str): # Tokenise the input text inputs = tokenizer(text, return_tensors="pt") # Run inference without tracking gradients with torch.no_grad(): outputs = model(**inputs) # Apply softmax to get probabilities probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1) # Get the most likely class predicted_class = torch.argmax(probabilities, dim=1).item() # Map the predicted class to the sentiment label predicted_label = label_mapping[predicted_class] # Retrieve the confidence score confidence = probabilities[0][predicted_class].item() return predicted_label, confidence def main(): st.title("Sentiment Analysis App") # Text input text_input = st.text_area("Enter your text here", "") # Predict sentiment when the button is clicked if st.button("Predict Sentiment"): if text_input.strip() == "": st.warning("Please enter some text first.") else: sentiment, confidence = predict_sentiment(text_input) st.write(f"**Sentiment:** {sentiment}") st.write(f"**Confidence:** {confidence:.2f}") if __name__ == "__main__": main()