dhruvsaxena11 commited on
Commit
429d7d7
·
verified ·
1 Parent(s): ad3307b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -35
app.py CHANGED
@@ -1,46 +1,36 @@
1
  # -*- coding: utf-8 -*-
2
- """Emotion Recognition_Fine Tuning
3
-
4
- Automatically generated by Colab.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1pZgt5n6943GB5oq_h43LjAYoA4yi-EST
8
- """
9
-
10
-
11
- """Our Application"""
12
-
13
 
14
  import numpy as np
15
-
16
- import tensorflow as tf # Apply softmax using tf.nn.softmax
17
-
18
- # Load the fine-tuned model from the saved directory
19
- # Load model directly
20
  from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
 
21
 
 
22
  loaded_model = TFAutoModelForSequenceClassification.from_pretrained("dhruvsaxena11/emoton_model_dhruv")
23
- # loaded_model = TFBertForSequenceClassification.from_pretrained("https://huggingface.co/spaces/dhruvsaxena11/Emotion_Recognition_in_Text/blob/main/tf_model.h5")
24
- loaded_tokenizer=AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
25
-
26
- def predict_emotion(text):
27
-
28
- text_token=loaded_tokenizer(text,padding=True,return_tensors="np")
29
- outputs=loaded_model(text_token)
30
- probabilities = tf.nn.softmax(outputs.logits)
31
- final=probabilities.numpy()
32
- labels=["sadness","joy","love","anger","fear","surprise"]
33
- final=final.tolist()
34
- result_dict = {k: v for k, v in zip(labels,final[0])}
35
- return result_dict
36
 
37
- predict_emotion("dhruv")
38
-
39
- my_labels=["sadness","joy","love","anger","fear","surprise"]
40
 
 
 
 
 
 
 
 
41
 
42
- import gradio as gr
43
  inputs = gr.Textbox(lines=1, label="Input Text")
44
  outputs = gr.Label(num_top_classes=6)
45
- interface = gr.Interface(fn=predict_emotion, inputs=inputs, outputs=outputs,title="Emotion Recognition in Text - NLP")
46
- interface.launch()
 
 
 
 
 
 
 
 
 
1
  # -*- coding: utf-8 -*-
2
+ """Emotion Recognition_Fine Tuning"""
 
 
 
 
 
 
 
 
 
 
3
 
4
  import numpy as np
5
+ import tensorflow as tf
 
 
 
 
6
  from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
7
+ import gradio as gr
8
 
9
+ # Load the fine-tuned model and tokenizer
10
  loaded_model = TFAutoModelForSequenceClassification.from_pretrained("dhruvsaxena11/emoton_model_dhruv")
11
+ loaded_tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
+ # Define labels
14
+ labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
 
15
 
16
+ # Define prediction function
17
+ def predict_emotion(text):
18
+ text_token = loaded_tokenizer(text, padding=True, return_tensors="tf")
19
+ outputs = loaded_model(text_token)
20
+ probabilities = tf.nn.softmax(outputs.logits, axis=-1)
21
+ result_dict = {k: v for k, v in zip(labels, probabilities.numpy()[0])}
22
+ return result_dict
23
 
24
+ # Set up Gradio interface
25
  inputs = gr.Textbox(lines=1, label="Input Text")
26
  outputs = gr.Label(num_top_classes=6)
27
+ interface = gr.Interface(
28
+ fn=predict_emotion,
29
+ inputs=inputs,
30
+ outputs=outputs,
31
+ title="Emotion Recognition in Text - NLP",
32
+ description="Enter a text input to predict the emotion"
33
+ )
34
+
35
+ # Launch interface
36
+ interface.launch(share=True)