Anushree1 commited on
Commit
6ed2594
·
verified ·
1 Parent(s): c3eaea2

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +125 -0
app.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ import cv2
4
+ from deepface import DeepFace
5
+ import numpy as np
6
+
7
+ # For Colab-specific display
8
+ from google.colab.patches import cv2_imshow
9
+
10
+ # Function to predict emotion from an image
11
+ def predict_emotion_image(image):
12
+ try:
13
+ # Convert the Gradio image (PIL format) to an OpenCV image
14
+ img = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
15
+
16
+ # Analyze the emotion using DeepFace
17
+ result = DeepFace.analyze(img, actions=['emotion'], enforce_detection=False)
18
+
19
+ # Get the dominant emotion
20
+ dominant_emotion = result[0]['dominant_emotion'] # Access the first result if it's a list
21
+
22
+ return f"Detected Emotion: {dominant_emotion}"
23
+ except Exception as e:
24
+ return f"Error in image emotion detection: {str(e)}"
25
+
26
+ # Function to process video, detect faces, and predict emotions
27
+ def predict_emotion_video(video):
28
+ try:
29
+ cap = cv2.VideoCapture(video)
30
+
31
+ if not cap.isOpened():
32
+ return "Error: Unable to open video."
33
+
34
+ # Initialize variables to store detected emotions
35
+ detected_emotions = []
36
+
37
+ # Processing each frame of the video
38
+ while True:
39
+ ret, frame = cap.read()
40
+ if not ret:
41
+ break
42
+
43
+ # Convert frame to grayscale for face detection
44
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
45
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
46
+ faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
47
+
48
+ for (x, y, w, h) in faces:
49
+ face = frame[y:y+h, x:x+w]
50
+
51
+ try:
52
+ result = DeepFace.analyze(face, actions=['emotion'], enforce_detection=False)
53
+ dominant_emotion = result[0]['dominant_emotion']
54
+
55
+ # Append detected emotion for the current frame
56
+ detected_emotions.append(dominant_emotion)
57
+
58
+ # Draw rectangle and label around face
59
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
60
+ cv2.putText(frame, dominant_emotion, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2)
61
+
62
+ except Exception as e:
63
+ print(f"Error analyzing face in video: {e}")
64
+
65
+ # Display the frame in Colab
66
+ cv2_imshow(frame)
67
+
68
+ # Break the loop if 'q' is pressed (ignored in Colab)
69
+ if cv2.waitKey(1) & 0xFF == ord('q'):
70
+ break
71
+
72
+ # Release video resources
73
+ cap.release()
74
+ cv2.destroyAllWindows()
75
+
76
+ # Combine detected emotions into a result string
77
+ result_text = "Video Emotion Results: " + ", ".join(detected_emotions) if detected_emotions else "No emotion detected."
78
+
79
+ return result_text
80
+
81
+ except Exception as e:
82
+ return f"Error in video emotion detection: {str(e)}"
83
+
84
+ # Function to handle choice (image or video)
85
+ def process_choice(choice, image=None, video=None):
86
+ if choice == "Image Emotion Detection":
87
+ return predict_emotion_image(image)
88
+ elif choice == "Video Emotion Detection":
89
+ return predict_emotion_video(video)
90
+ else:
91
+ return "Please select a valid option."
92
+
93
+ # Gradio interface
94
+ with gr.Blocks() as demo:
95
+ gr.Markdown("<h1 style='color: #e91e63;'>Image and Video Emotion Recognition</h1>")
96
+
97
+ # Dropdown to select between image or video detection
98
+ choice = gr.Dropdown(["Image Emotion Detection", "Video Emotion Detection"], label="Choose Mode")
99
+
100
+ # Inputs for image or video
101
+ image_input = gr.Image(type="pil", label="Upload Image", visible=False)
102
+ video_input = gr.Video(label="Upload Video", visible=False)
103
+
104
+ # Function to show/hide inputs based on selection
105
+ def update_input(choice):
106
+ if choice == "Image Emotion Detection":
107
+ return gr.update(visible=True), gr.update(visible=False)
108
+ elif choice == "Video Emotion Detection":
109
+ return gr.update(visible=False), gr.update(visible=True)
110
+ return gr.update(visible=False), gr.update(visible=False)
111
+
112
+ # Update visibility of inputs
113
+ choice.change(fn=update_input, inputs=choice, outputs=[image_input, video_input])
114
+
115
+ # Output
116
+ result_output = gr.Textbox(label="Emotion Detection Result")
117
+
118
+ # Button to process the image or video
119
+ submit_btn = gr.Button("Analyze Emotion")
120
+
121
+ # Connect button to function for processing choice
122
+ submit_btn.click(fn=process_choice, inputs=[choice, image_input, video_input], outputs=result_output)
123
+
124
+ # Launch the Gradio app
125
+ demo.launch()