maxinethegreat commited on
Commit
fd876cd
·
1 Parent(s): f1d5a11

try to reduce lag

Browse files
Files changed (1) hide show
  1. app.py +15 -15
app.py CHANGED
@@ -13,20 +13,20 @@ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_fronta
13
  emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
14
 
15
  # Define the predict_emotion function
16
- # def predict_emotion(frame):
17
- # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
18
- # faces = face_cascade.detectMultiScale(gray, 1.3, 5)
19
- # for (x, y, w, h) in faces:
20
- # face = gray[y:y+h, x:x+w]
21
- # face = cv2.resize(face, (48, 48))
22
- # face = np.expand_dims(face, axis=-1)
23
- # face = np.expand_dims(face, axis=0)
24
- # prediction = model.predict(face)
25
- # emotion = emotions[np.argmax(prediction)]
26
- # cv2.putText(frame, emotion, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
27
- # cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
28
-
29
- # return frame
30
 
31
  # Start the video capture and emotion detection
32
  # cap = cv2.VideoCapture(0)
@@ -49,7 +49,7 @@ output_image = gr.Image( type="pil", label="Detected Emotion" )
49
 
50
 
51
  iface = gr.Interface(
52
- # fn = predict_emotion,
53
  inputs=input_image,
54
  outputs=output_image,
55
  batch = True,
 
13
  emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
14
 
15
  # Define the predict_emotion function
16
+ def predict_emotion(frame):
17
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
18
+ # faces = face_cascade.detectMultiScale(gray, 1.3, 5)
19
+ # for (x, y, w, h) in faces:
20
+ # face = gray[y:y+h, x:x+w]
21
+ # face = cv2.resize(face, (48, 48))
22
+ # face = np.expand_dims(face, axis=-1)
23
+ # face = np.expand_dims(face, axis=0)
24
+ # prediction = model.predict(face)
25
+ # emotion = emotions[np.argmax(prediction)]
26
+ # cv2.putText(frame, emotion, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
27
+ # cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
28
+
29
+ return gray
30
 
31
  # Start the video capture and emotion detection
32
  # cap = cv2.VideoCapture(0)
 
49
 
50
 
51
  iface = gr.Interface(
52
+ fn = predict_emotion,
53
  inputs=input_image,
54
  outputs=output_image,
55
  batch = True,