|
|
|
"""Emotion Recognition_Fine Tuning""" |
|
|
|
import numpy as np |
|
import tensorflow as tf |
|
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification |
|
import gradio as gr |
|
|
|
|
|
loaded_model = TFAutoModelForSequenceClassification.from_pretrained("dhruvsaxena11/emoton_model_dhruv") |
|
loaded_tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") |
|
|
|
|
|
labels = ["sadness", "joy", "love", "anger", "fear", "surprise"] |
|
|
|
|
|
def predict_emotion(text): |
|
text_token = loaded_tokenizer(text, padding=True, return_tensors="tf") |
|
outputs = loaded_model(text_token) |
|
probabilities = tf.nn.softmax(outputs.logits, axis=-1) |
|
result_dict = {k: v for k, v in zip(labels, probabilities.numpy()[0])} |
|
return result_dict |
|
|
|
|
|
inputs = gr.Textbox(lines=1, label="Input Text") |
|
outputs = gr.Label(num_top_classes=6) |
|
interface = gr.Interface( |
|
fn=predict_emotion, |
|
inputs=inputs, |
|
outputs=outputs, |
|
title="Emotion Recognition in Text - NLP", |
|
description="Enter a text input to predict the emotion" |
|
) |
|
|
|
|
|
interface.launch(share=True) |
|
|