# -*- coding: utf-8 -*- """Emotion Recognition_Fine Tuning""" import numpy as np import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSequenceClassification import gradio as gr # Load the fine-tuned model and tokenizer loaded_model = TFAutoModelForSequenceClassification.from_pretrained("dhruvsaxena11/emoton_model_dhruv") loaded_tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") # Define labels labels = ["sadness", "joy", "love", "anger", "fear", "surprise"] # Define prediction function def predict_emotion(text): text_token = loaded_tokenizer(text, padding=True, return_tensors="tf") outputs = loaded_model(text_token) probabilities = tf.nn.softmax(outputs.logits, axis=-1) result_dict = {k: v for k, v in zip(labels, probabilities.numpy()[0])} return result_dict # Set up Gradio interface inputs = gr.Textbox(lines=1, label="Input Text") outputs = gr.Label(num_top_classes=6) interface = gr.Interface( fn=predict_emotion, inputs=inputs, outputs=outputs, title="Emotion Recognition in Text - NLP", description="Enter a text input to predict the emotion" ) # Launch interface interface.launch(share=True)