LovnishVerma commited on
Commit
7c3eaea
·
verified ·
1 Parent(s): ac63b48

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -51
app.py CHANGED
@@ -5,6 +5,7 @@ import time
5
  import os
6
  from keras.models import load_model
7
  from PIL import Image
 
8
  import tempfile
9
 
10
  # Larger title
@@ -13,38 +14,40 @@ st.markdown("<h1 style='text-align: center;'>Emotion Detection with Face Recogni
13
  # Smaller subtitle
14
  st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
15
 
 
16
  start = time.time()
17
 
18
- # Load the emotion model
19
  @st.cache_resource
20
  def load_emotion_model():
21
  model = load_model('CNN_Model_acc_75.h5') # Ensure this file is in your Space
22
  return model
23
 
24
  model = load_emotion_model()
25
- print("time taken to load model: ", time.time() - start)
26
 
27
  # Emotion labels
28
  emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
29
 
30
- # Load known faces (from images in a folder)
31
  known_faces = []
32
  known_names = []
33
  face_recognizer = cv2.face.LBPHFaceRecognizer_create()
34
 
35
  def load_known_faces():
36
- folder_path = "known_faces" # Place your folder with known faces here
37
  for image_name in os.listdir(folder_path):
38
  if image_name.endswith(('.jpg', '.jpeg', '.png')):
39
  image_path = os.path.join(folder_path, image_name)
40
  image = cv2.imread(image_path)
41
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
42
  # Detect face in the image
43
- faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
 
 
44
 
45
  for (x, y, w, h) in faces:
46
  roi_gray = gray[y:y+h, x:x+w]
47
- # We only need the face, so we crop it and store it for training
48
  known_faces.append(roi_gray)
49
  known_names.append(image_name.split('.')[0]) # Assuming file name is the person's name
50
 
@@ -53,47 +56,55 @@ def load_known_faces():
53
 
54
  load_known_faces()
55
 
56
- # Face detection using OpenCV
57
- face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
58
- img_shape = 48
59
 
 
60
  def process_frame(frame):
61
- gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
62
- faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
63
-
64
- result_text = "" # Initialize the result text for display
65
-
66
- for (x, y, w, h) in faces:
67
- roi_gray = gray_frame[y:y+h, x:x+w]
68
- roi_color = frame[y:y+h, x:x+w]
69
- face_roi = cv2.resize(roi_color, (img_shape, img_shape)) # Resize to 48x48
70
- face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB) # Convert to RGB (3 channels)
71
- face_roi = np.expand_dims(face_roi, axis=0) # Add batch dimension
72
- face_roi = face_roi / 255.0 # Normalize the image
73
-
74
- # Emotion detection
75
- predictions = model.predict(face_roi)
76
- emotion = emotion_labels[np.argmax(predictions[0])]
77
-
78
- # Face recognition using LBPH
79
- label, confidence = face_recognizer.predict(roi_gray)
80
- name = "Unknown"
81
- if confidence < 100:
82
- name = known_names[label]
83
-
84
- # Format the result text as "Name is feeling Emotion"
85
- result_text = f"{name} is feeling {emotion}"
86
-
87
- # Draw bounding box and label on the frame
88
- cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
89
- cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
 
 
 
 
 
 
 
 
90
 
91
  return frame, result_text
92
 
93
- # Video feed
94
  def video_feed(video_source):
95
- frame_placeholder = st.empty() # This placeholder will be used to replace frames in-place
96
- text_placeholder = st.empty() # This placeholder will display the result text
97
 
98
  while True:
99
  ret, frame = video_source.read()
@@ -102,21 +113,17 @@ def video_feed(video_source):
102
 
103
  frame, result_text = process_frame(frame)
104
 
105
- # Display the frame in the placeholder
106
  frame_placeholder.image(frame, channels="BGR", use_column_width=True)
107
-
108
- # Display the result text in the text placeholder
109
  text_placeholder.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
110
 
111
- # Sidebar for video or image upload
112
- upload_choice = st.sidebar.radio("Choose input source", ["Upload Image", "Upload Video", "Camera"])
113
 
114
  if upload_choice == "Camera":
115
- # Use Streamlit's built-in camera input widget for capturing images from the webcam
116
  image = st.camera_input("Take a picture")
117
 
118
- if image is not None:
119
- # Convert the image to a numpy array
120
  frame = np.array(Image.open(image))
121
  frame, result_text = process_frame(frame)
122
  st.image(frame, caption='Processed Image', use_column_width=True)
@@ -124,6 +131,7 @@ if upload_choice == "Camera":
124
 
125
  elif upload_choice == "Upload Image":
126
  uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg", "gif"])
 
127
  if uploaded_image:
128
  image = Image.open(uploaded_image)
129
  frame = np.array(image)
@@ -133,11 +141,11 @@ elif upload_choice == "Upload Image":
133
 
134
  elif upload_choice == "Upload Video":
135
  uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi", "mkv", "webm"])
 
136
  if uploaded_video:
137
- # Temporarily save the video to disk
138
  with tempfile.NamedTemporaryFile(delete=False) as tfile:
139
  tfile.write(uploaded_video.read())
140
  video_source = cv2.VideoCapture(tfile.name)
141
  video_feed(video_source)
142
 
143
- st.sidebar.write("Emotion Labels: Angry, Fear, Happy, Neutral, Sad, Surprise")
 
5
  import os
6
  from keras.models import load_model
7
  from PIL import Image
8
+ import mediapipe as mp
9
  import tempfile
10
 
11
  # Larger title
 
14
  # Smaller subtitle
15
  st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
16
 
17
+ # Start time for measuring performance
18
  start = time.time()
19
 
20
+ # Load the emotion detection model
21
  @st.cache_resource
22
  def load_emotion_model():
23
  model = load_model('CNN_Model_acc_75.h5') # Ensure this file is in your Space
24
  return model
25
 
26
  model = load_emotion_model()
27
+ print("Time taken to load model: ", time.time() - start)
28
 
29
  # Emotion labels
30
  emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
31
 
32
+ # Load known faces and names
33
  known_faces = []
34
  known_names = []
35
  face_recognizer = cv2.face.LBPHFaceRecognizer_create()
36
 
37
  def load_known_faces():
38
+ folder_path = "known_faces" # Folder containing known face images
39
  for image_name in os.listdir(folder_path):
40
  if image_name.endswith(('.jpg', '.jpeg', '.png')):
41
  image_path = os.path.join(folder_path, image_name)
42
  image = cv2.imread(image_path)
43
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
44
  # Detect face in the image
45
+ faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(
46
+ gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)
47
+ )
48
 
49
  for (x, y, w, h) in faces:
50
  roi_gray = gray[y:y+h, x:x+w]
 
51
  known_faces.append(roi_gray)
52
  known_names.append(image_name.split('.')[0]) # Assuming file name is the person's name
53
 
 
56
 
57
  load_known_faces()
58
 
59
+ # Mediapipe face detection
60
+ mp_face_detection = mp.solutions.face_detection.FaceDetection(min_detection_confidence=0.5)
 
61
 
62
+ # Process a single frame
63
  def process_frame(frame):
64
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
65
+ results = mp_face_detection.process(rgb_frame)
66
+
67
+ result_text = "" # Initialize result text
68
+
69
+ if results.detections:
70
+ for detection in results.detections:
71
+ bboxC = detection.location_data.relative_bounding_box
72
+ h, w, _ = frame.shape
73
+ bbox = (
74
+ int(bboxC.xmin * w), int(bboxC.ymin * h),
75
+ int(bboxC.width * w), int(bboxC.height * h)
76
+ )
77
+ x, y, w, h = bbox
78
+
79
+ roi_color = frame[y:y+h, x:x+w]
80
+ roi_gray = cv2.cvtColor(roi_color, cv2.COLOR_BGR2GRAY)
81
+ face_roi = cv2.resize(roi_color, (48, 48))
82
+ face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB)
83
+ face_roi = np.expand_dims(face_roi, axis=0) / 255.0 # Normalize
84
+
85
+ # Emotion detection
86
+ predictions = model.predict(face_roi)
87
+ emotion = emotion_labels[np.argmax(predictions[0])]
88
+
89
+ # Face recognition
90
+ name = "Unknown"
91
+ label, confidence = face_recognizer.predict(roi_gray)
92
+ if confidence < 100:
93
+ name = known_names[label]
94
+
95
+ # Format result text
96
+ result_text = f"{name} is feeling {emotion}"
97
+
98
+ # Draw bounding box and label
99
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
100
+ cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
101
 
102
  return frame, result_text
103
 
104
+ # Video feed display
105
  def video_feed(video_source):
106
+ frame_placeholder = st.empty() # Placeholder for displaying video frames
107
+ text_placeholder = st.empty() # Placeholder for displaying result text
108
 
109
  while True:
110
  ret, frame = video_source.read()
 
113
 
114
  frame, result_text = process_frame(frame)
115
 
116
+ # Display frame and result text
117
  frame_placeholder.image(frame, channels="BGR", use_column_width=True)
 
 
118
  text_placeholder.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
119
 
120
+ # Sidebar for user input source selection
121
+ upload_choice = st.sidebar.radio("Choose Input Source", ["Upload Image", "Upload Video", "Camera"])
122
 
123
  if upload_choice == "Camera":
 
124
  image = st.camera_input("Take a picture")
125
 
126
+ if image:
 
127
  frame = np.array(Image.open(image))
128
  frame, result_text = process_frame(frame)
129
  st.image(frame, caption='Processed Image', use_column_width=True)
 
131
 
132
  elif upload_choice == "Upload Image":
133
  uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg", "gif"])
134
+
135
  if uploaded_image:
136
  image = Image.open(uploaded_image)
137
  frame = np.array(image)
 
141
 
142
  elif upload_choice == "Upload Video":
143
  uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi", "mkv", "webm"])
144
+
145
  if uploaded_video:
 
146
  with tempfile.NamedTemporaryFile(delete=False) as tfile:
147
  tfile.write(uploaded_video.read())
148
  video_source = cv2.VideoCapture(tfile.name)
149
  video_feed(video_source)
150
 
151
+ st.sidebar.write("Emotion Labels: Angry, Fear, Happy, Neutral, Sad, Surprise")