File size: 1,204 Bytes
584ad1e 429d7d7 584ad1e 429d7d7 ad3307b 429d7d7 d8eb525 429d7d7 ad3307b 429d7d7 584ad1e 429d7d7 584ad1e 429d7d7 584ad1e 429d7d7 584ad1e 429d7d7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
# -*- coding: utf-8 -*-
"""Emotion Recognition_Fine Tuning"""
import numpy as np
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
import gradio as gr
# Load the fine-tuned model and tokenizer
loaded_model = TFAutoModelForSequenceClassification.from_pretrained("dhruvsaxena11/emoton_model_dhruv")
loaded_tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
# Define labels
labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
# Define prediction function
def predict_emotion(text):
text_token = loaded_tokenizer(text, padding=True, return_tensors="tf")
outputs = loaded_model(text_token)
probabilities = tf.nn.softmax(outputs.logits, axis=-1)
result_dict = {k: v for k, v in zip(labels, probabilities.numpy()[0])}
return result_dict
# Set up Gradio interface
inputs = gr.Textbox(lines=1, label="Input Text")
outputs = gr.Label(num_top_classes=6)
interface = gr.Interface(
fn=predict_emotion,
inputs=inputs,
outputs=outputs,
title="Emotion Recognition in Text - NLP",
description="Enter a text input to predict the emotion"
)
# Launch interface
interface.launch(share=True)
|