Anushree1 commited on
Commit
708a82d
·
verified ·
1 Parent(s): ba29a8a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -12
app.py CHANGED
@@ -1,12 +1,8 @@
1
-
2
  import gradio as gr
3
  import cv2
4
  from deepface import DeepFace
5
  import numpy as np
6
 
7
- # For Colab-specific display
8
- from google.colab.patches import cv2_imshow
9
-
10
  # Function to predict emotion from an image
11
  def predict_emotion_image(image):
12
  try:
@@ -34,6 +30,11 @@ def predict_emotion_video(video):
34
  # Initialize variables to store detected emotions
35
  detected_emotions = []
36
 
 
 
 
 
 
37
  # Processing each frame of the video
38
  while True:
39
  ret, frame = cap.read()
@@ -62,16 +63,12 @@ def predict_emotion_video(video):
62
  except Exception as e:
63
  print(f"Error analyzing face in video: {e}")
64
 
65
- # Display the frame in Colab
66
- cv2_imshow(frame)
67
-
68
- # Break the loop if 'q' is pressed (ignored in Colab)
69
- if cv2.waitKey(1) & 0xFF == ord('q'):
70
- break
71
-
72
  # Release video resources
73
  cap.release()
74
- cv2.destroyAllWindows()
75
 
76
  # Combine detected emotions into a result string
77
  result_text = "Video Emotion Results: " + ", ".join(detected_emotions) if detected_emotions else "No emotion detected."
 
 
1
  import gradio as gr
2
  import cv2
3
  from deepface import DeepFace
4
  import numpy as np
5
 
 
 
 
6
  # Function to predict emotion from an image
7
  def predict_emotion_image(image):
8
  try:
 
30
  # Initialize variables to store detected emotions
31
  detected_emotions = []
32
 
33
+ # Define codec and create VideoWriter object to save the output video
34
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Codec for .mp4
35
+ output_filename = "output_video.mp4"
36
+ out = cv2.VideoWriter(output_filename, fourcc, 20.0, (int(cap.get(3)), int(cap.get(4))))
37
+
38
  # Processing each frame of the video
39
  while True:
40
  ret, frame = cap.read()
 
63
  except Exception as e:
64
  print(f"Error analyzing face in video: {e}")
65
 
66
+ # Write the frame with the detection to the output video
67
+ out.write(frame)
68
+
 
 
 
 
69
  # Release video resources
70
  cap.release()
71
+ out.release()
72
 
73
  # Combine detected emotions into a result string
74
  result_text = "Video Emotion Results: " + ", ".join(detected_emotions) if detected_emotions else "No emotion detected."