maxinethegreat commited on
Commit
5fc096b
·
1 Parent(s): 0038cec

try diff image preprocessing before predictions

Browse files
Files changed (1) hide show
  1. app.py +29 -14
app.py CHANGED
@@ -6,25 +6,40 @@ import numpy as np
6
 
7
 
8
  # Load the saved model
9
- model = tf.keras.models.load_model('model/cnn_9_layer_model.h5')
10
 
11
  # Define the face cascade and emotions
12
  face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
13
  emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
14
 
 
 
 
15
  # Define the predict_emotion function
16
- def predict_emotion(frame):
17
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
18
- faces = face_cascade.detectMultiScale(gray, 1.3, 5)
19
- for (x, y, w, h) in faces:
20
- face = gray[y:y+h, x:x+w]
21
- face = cv2.resize(face, (48, 48))
22
- face = np.expand_dims(face, axis=-1)
23
- face = np.expand_dims(face, axis=0)
24
- prediction = model.predict(face)
25
- emotion = emotions[np.argmax(prediction)]
26
- cv2.putText(frame, emotion, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
27
- cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  return frame
30
 
@@ -44,7 +59,7 @@ def predict_emotion(frame):
44
  input_image = gr.Image(source = "webcam", streaming = True, label="Your Face")
45
  # video = gr.inputs.Video(source = "webcam" )
46
 
47
- output_image = gr.Image( type="pil", label="Detected Emotion" )
48
 
49
 
50
 
 
6
 
7
 
8
  # Load the saved model
9
+ model = tf.keras.models.load_model('model/model.h5')
10
 
11
  # Define the face cascade and emotions
12
  face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
13
  emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
14
 
15
+ no_face_detection_alert = "Cannot Detect Face"
16
+ low_confidence_alert = "Cannot Detect Emotion"
17
+
18
  # Define the predict_emotion function
19
+ for (x, y, w, h) in faces:
20
+ face = gray[y:y+h, x:x+w]
21
+ face = cv2.resize(face, (48, 48), interpolation = cv2.INTER_AREA)
22
+ if np.sum([face])!=0:
23
+ face_test = face.astype('float')/255.0
24
+ face_test = tf.keras.utils.img_to_array(face_test)
25
+ face_test = np.expand_dims(face_test, axis=0)
26
+ prediction = model.predict(face_test)
27
+
28
+
29
+ if any(prob >.5 for prob in prediction[0]):
30
+
31
+ emotion = emotions[np.argmax(prediction)]
32
+
33
+ cv2.putText(frame, emotion, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (128, 128, 0), 2)
34
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 0), 2)
35
+
36
+ else:
37
+ cv2.putText(frame, low_confidence_alert, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 69, 255),
38
+ 2)
39
+
40
+ else:
41
+ cv2.putText(frame, no_face_detection_alert, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 69, 255), 2)
42
+
43
 
44
  return frame
45
 
 
59
  input_image = gr.Image(source = "webcam", streaming = True, label="Your Face")
60
  # video = gr.inputs.Video(source = "webcam" )
61
 
62
+ output_image = gr.Image( type="numpy", label="Detected Emotion" )
63
 
64
 
65