Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,127 +1,63 @@
|
|
1 |
import gradio as gr
|
2 |
-
import numpy as np
|
3 |
-
import cv2
|
4 |
-
import librosa
|
5 |
-
import tempfile
|
6 |
-
import wave
|
7 |
-
import os
|
8 |
-
import speech_recognition as sr
|
9 |
-
import pickle
|
10 |
-
import json
|
11 |
-
from tensorflow.keras.models import load_model
|
12 |
-
from tensorflow.keras.preprocessing.text import tokenizer_from_json
|
13 |
-
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
14 |
-
import nltk
|
15 |
-
from collections import Counter
|
16 |
from transformers import LlamaTokenizer, LlamaForCausalLM
|
|
|
|
|
17 |
|
18 |
-
# Initialize
|
19 |
-
# Load the tokenizer and model for text-based emotion prediction
|
20 |
-
with open('tokenizer.json') as json_file:
|
21 |
-
tokenizer_json = json.load(json_file)
|
22 |
-
tokenizer = tokenizer_from_json(tokenizer_json)
|
23 |
-
text_model = load_model('model_for_text_emotion_updated(1).keras')
|
24 |
-
|
25 |
-
# Load the audio emotion model and scaler
|
26 |
-
with open('encoder.pkl', 'rb') as file:
|
27 |
-
encoder = pickle.load(file)
|
28 |
-
with open('scaler.pkl', 'rb') as file:
|
29 |
-
scaler = pickle.load(file)
|
30 |
-
audio_model = load_model('my_model.h5')
|
31 |
-
|
32 |
-
# Load the LLaMA model for question answering
|
33 |
llama_tokenizer = LlamaTokenizer.from_pretrained('huggingface/llama-7b')
|
34 |
llama_model = LlamaForCausalLM.from_pretrained('huggingface/llama-7b')
|
35 |
|
36 |
-
#
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
lemmatizer = nltk.WordNetLemmatizer()
|
41 |
-
stop_words = set(nltk.corpus.stopwords.words('english'))
|
42 |
-
|
43 |
-
# Preprocess text for emotion prediction
|
44 |
-
def preprocess_text(text):
|
45 |
-
tokens = nltk.word_tokenize(text.lower())
|
46 |
-
tokens = [word for word in tokens if word.isalnum() and word not in stop_words]
|
47 |
-
lemmatized_tokens = [lemmatizer.lemmatize(word) for word in tokens]
|
48 |
-
return ' '.join(lemmatized_tokens)
|
49 |
-
|
50 |
-
# Extract audio features and predict emotion
|
51 |
-
def extract_audio_features(data, sample_rate):
|
52 |
-
result = np.array([])
|
53 |
-
zcr = np.mean(librosa.feature.zero_crossing_rate(y=data).T, axis=0)
|
54 |
-
result = np.hstack((result, zcr))
|
55 |
-
mfcc = np.mean(librosa.feature.mfcc(y=data, sr=sample_rate).T, axis=0)
|
56 |
-
result = np.hstack((result, mfcc))
|
57 |
-
return result
|
58 |
-
|
59 |
-
def predict_emotion_from_audio(audio_data):
|
60 |
-
sample_rate, data = audio_data
|
61 |
-
features = extract_audio_features(data, sample_rate)
|
62 |
-
features = np.expand_dims(features, axis=0)
|
63 |
-
scaled_features = scaler.transform(features)
|
64 |
-
prediction = audio_model.predict(scaled_features)
|
65 |
-
emotion_index = np.argmax(prediction)
|
66 |
-
emotion_array = np.zeros((1, len(encoder.categories_[0])))
|
67 |
-
emotion_array[0, emotion_index] = 1
|
68 |
-
emotion_label = encoder.inverse_transform(emotion_array)[0]
|
69 |
-
return emotion_label
|
70 |
-
|
71 |
-
# Extract text from audio (speech recognition)
|
72 |
-
def extract_text_from_audio(audio_path):
|
73 |
-
recognizer = sr.Recognizer()
|
74 |
-
with sr.AudioFile(audio_path) as source:
|
75 |
-
audio_data = recognizer.record(source)
|
76 |
-
text = recognizer.recognize_google(audio_data)
|
77 |
-
return text
|
78 |
-
|
79 |
-
# Use LLaMA to answer questions based on the text
|
80 |
-
def ask_llama(question, context):
|
81 |
-
inputs = llama_tokenizer(question, context, return_tensors="pt")
|
82 |
-
outputs = llama_model.generate(inputs['input_ids'], max_length=150)
|
83 |
-
answer = llama_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
84 |
-
return answer
|
85 |
-
|
86 |
-
# Process the video and extract text, emotion, and context for LLaMA
|
87 |
-
def process_video(video_path):
|
88 |
-
# Extract audio from the video
|
89 |
-
video = mp.VideoFileClip(video_path)
|
90 |
-
if video.audio is None:
|
91 |
-
raise ValueError("No audio found in the video.")
|
92 |
|
93 |
-
audio
|
94 |
-
|
95 |
-
temp_audio_path = temp_audio_file.name
|
96 |
-
audio.write_audiofile(temp_audio_path)
|
97 |
-
|
98 |
-
# Extract text from the audio
|
99 |
-
video_text = extract_text_from_audio(temp_audio_path)
|
100 |
|
101 |
-
#
|
102 |
-
|
103 |
-
title_seq = tokenizer.texts_to_sequences([preprocessed_text])
|
104 |
-
padded_title_seq = pad_sequences(title_seq, maxlen=35, padding='post', truncating='post')
|
105 |
-
text_emotion_prediction = text_model.predict(np.array(padded_title_seq))
|
106 |
-
text_emotion = ['anger', 'disgust', 'fear', 'joy', 'neutral', 'sadness', 'surprise'][np.argmax(text_emotion_prediction)]
|
107 |
|
108 |
-
|
109 |
-
audio_emotion = predict_emotion_from_audio((audio.fps, audio_data))
|
110 |
|
111 |
-
|
112 |
-
|
113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
|
115 |
-
#
|
116 |
-
def
|
117 |
-
|
118 |
-
|
119 |
-
return f"Text Emotion: {text_emotion}, Audio Emotion: {audio_emotion}\nAnswer: {answer}"
|
120 |
|
121 |
-
iface = gr.Interface(fn=
|
122 |
-
inputs=[
|
123 |
outputs="text",
|
124 |
-
title="
|
125 |
-
description="
|
126 |
|
127 |
iface.launch()
|
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from transformers import LlamaTokenizer, LlamaForCausalLM
|
3 |
+
import tempfile
|
4 |
+
import numpy as np
|
5 |
|
6 |
+
# Initialize LLaMA Model for Question Answering
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
llama_tokenizer = LlamaTokenizer.from_pretrained('huggingface/llama-7b')
|
8 |
llama_model = LlamaForCausalLM.from_pretrained('huggingface/llama-7b')
|
9 |
|
10 |
+
# Updated transcribe_and_predict_video function from your code
|
11 |
+
def transcribe_and_predict_video(video):
|
12 |
+
# Process video frames for image-based emotion recognition
|
13 |
+
image_emotion = process_video(video)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
+
# Process audio for text and audio-based emotion recognition
|
16 |
+
text_emotion, audio_emotion = process_audio_from_video(video)
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
+
# Determine the overall emotion (could be based on majority vote or some other logic)
|
19 |
+
overall_emotion = Counter([text_emotion, audio_emotion, image_emotion]).most_common(1)[0][0]
|
|
|
|
|
|
|
|
|
20 |
|
21 |
+
return overall_emotion
|
|
|
22 |
|
23 |
+
# Emotion-aware Question Answering with LLM
|
24 |
+
def emotion_aware_qa(question, video):
|
25 |
+
# Get the emotion from the video (this uses the emotion detection you already implemented)
|
26 |
+
detected_emotion = transcribe_and_predict_video(video)
|
27 |
+
|
28 |
+
# Create a custom response context based on the detected emotion
|
29 |
+
if detected_emotion == 'joy':
|
30 |
+
emotion_context = "You're in a good mood! Let's keep the positivity going."
|
31 |
+
elif detected_emotion == 'sadness':
|
32 |
+
emotion_context = "It seems like you're feeling a bit down. Let me help with that."
|
33 |
+
elif detected_emotion == 'anger':
|
34 |
+
emotion_context = "I sense some frustration. Let's work through it together."
|
35 |
+
elif detected_emotion == 'fear':
|
36 |
+
emotion_context = "It sounds like you're anxious. How can I assist in calming things down?"
|
37 |
+
elif detected_emotion == 'neutral':
|
38 |
+
emotion_context = "You're feeling neutral. How can I help you today?"
|
39 |
+
else:
|
40 |
+
emotion_context = "You're in an uncertain emotional state. Let me guide you."
|
41 |
+
|
42 |
+
# Prepare the prompt for LLaMA, including emotion context and user question
|
43 |
+
prompt = f"{emotion_context} User asks: {question}"
|
44 |
+
|
45 |
+
# Tokenize and generate response from LLaMA
|
46 |
+
inputs = llama_tokenizer(prompt, return_tensors="pt")
|
47 |
+
outputs = llama_model.generate(inputs['input_ids'], max_length=150)
|
48 |
+
answer = llama_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
49 |
+
|
50 |
+
return answer
|
51 |
|
52 |
+
# Create Gradio interface to interact with the LLM and video emotion detection
|
53 |
+
def gradio_interface(question, video):
|
54 |
+
response = emotion_aware_qa(question, video)
|
55 |
+
return response
|
|
|
56 |
|
57 |
+
iface = gr.Interface(fn=gradio_interface,
|
58 |
+
inputs=["text", gr.Video()],
|
59 |
outputs="text",
|
60 |
+
title="Emotion-Aware Question Answering",
|
61 |
+
description="Ask a question and get an emotion-aware response based on the video.")
|
62 |
|
63 |
iface.launch()
|