Spaces:
Running
Running
File size: 1,673 Bytes
c7cb728 2a81a31 c7cb728 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import streamlit as st
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer
# Load the fine-tuned model and tokenizer at startup
model_name = "EbukaGaus/EbukaMBert"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
# Define the label mapping
label_mapping = {0: 'Neutral', 1: 'Negative', 2: 'Positive'}
# Define a function to predict sentiment
def predict_sentiment(text: str):
# Tokenise the input text
inputs = tokenizer(text, return_tensors="pt")
# Run inference without tracking gradients
with torch.no_grad():
outputs = model(**inputs)
# Apply softmax to get probabilities
probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
# Get the most likely class
predicted_class = torch.argmax(probabilities, dim=1).item()
# Map the predicted class to the sentiment label
predicted_label = label_mapping[predicted_class]
# Retrieve the confidence score
confidence = probabilities[0][predicted_class].item()
return predicted_label, confidence
def main():
st.title("Sentiment Analysis App")
# Text input
text_input = st.text_area("Enter your text here", "")
# Predict sentiment when the button is clicked
if st.button("Predict Sentiment"):
if text_input.strip() == "":
st.warning("Please enter some text first.")
else:
sentiment, confidence = predict_sentiment(text_input)
st.write(f"**Sentiment:** {sentiment}")
st.write(f"**Confidence:** {confidence:.2f}")
if __name__ == "__main__":
main()
|