LovnishVerma commited on
Commit
c8ff68c
·
verified ·
1 Parent(s): da91735

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -31
app.py CHANGED
@@ -6,66 +6,68 @@ import os
6
  from keras.models import load_model
7
  from PIL import Image
8
  import mediapipe as mp
 
9
  import tempfile
10
 
11
- # Larger title
12
- st.markdown("<h1 style='text-align: center;'>Emotion Detection with Face Recognition</h1>", unsafe_allow_html=True)
13
 
14
- # Smaller subtitle
 
15
  st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
16
 
17
- # Start time for measuring performance
18
- start = time.time()
19
-
20
- # Load the emotion detection model
21
  @st.cache_resource
22
  def load_emotion_model():
23
- model = load_model('CNN_Model_acc_75.h5') # Ensure this file is in your Space
24
  return model
25
 
26
  model = load_emotion_model()
27
- print("Time taken to load model: ", time.time() - start)
28
-
29
- # Emotion labels
30
  emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
31
 
32
- # Load known faces and names
33
  known_faces = []
34
  known_names = []
35
  face_recognizer = cv2.face.LBPHFaceRecognizer_create()
36
 
37
  def load_known_faces():
38
- folder_path = "known_faces" # Folder containing known face images
39
  for image_name in os.listdir(folder_path):
40
  if image_name.endswith(('.jpg', '.jpeg', '.png')):
41
  image_path = os.path.join(folder_path, image_name)
42
  image = cv2.imread(image_path)
43
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
44
- # Detect face in the image
45
  faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(
46
  gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)
47
  )
48
-
49
  for (x, y, w, h) in faces:
50
  roi_gray = gray[y:y+h, x:x+w]
51
  known_faces.append(roi_gray)
52
- known_names.append(image_name.split('.')[0]) # Assuming file name is the person's name
53
-
54
- # Train the recognizer with the known faces
55
  face_recognizer.train(known_faces, np.array([i for i in range(len(known_faces))]))
56
 
57
  load_known_faces()
58
 
59
- # Mediapipe face detection
60
  mp_face_detection = mp.solutions.face_detection.FaceDetection(min_detection_confidence=0.5)
61
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  # Process a single frame
63
  def process_frame(frame):
64
  rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
65
  results = mp_face_detection.process(rgb_frame)
66
 
67
- result_text = "" # Initialize result text
68
-
69
  if results.detections:
70
  for detection in results.detections:
71
  bboxC = detection.location_data.relative_bounding_box
@@ -80,22 +82,20 @@ def process_frame(frame):
80
  roi_gray = cv2.cvtColor(roi_color, cv2.COLOR_BGR2GRAY)
81
  face_roi = cv2.resize(roi_color, (48, 48))
82
  face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB)
83
- face_roi = np.expand_dims(face_roi, axis=0) / 255.0 # Normalize
84
 
85
- # Emotion detection
86
  predictions = model.predict(face_roi)
87
  emotion = emotion_labels[np.argmax(predictions[0])]
88
 
89
- # Face recognition
90
  name = "Unknown"
91
  label, confidence = face_recognizer.predict(roi_gray)
92
  if confidence < 100:
93
  name = known_names[label]
94
 
95
- # Format result text
96
  result_text = f"{name} is feeling {emotion}"
 
 
97
 
98
- # Draw bounding box and label
99
  cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
100
  cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
101
 
@@ -103,8 +103,8 @@ def process_frame(frame):
103
 
104
  # Video feed display
105
  def video_feed(video_source):
106
- frame_placeholder = st.empty() # Placeholder for displaying video frames
107
- text_placeholder = st.empty() # Placeholder for displaying result text
108
 
109
  while True:
110
  ret, frame = video_source.read()
@@ -112,8 +112,6 @@ def video_feed(video_source):
112
  break
113
 
114
  frame, result_text = process_frame(frame)
115
-
116
- # Display frame and result text
117
  frame_placeholder.image(frame, channels="BGR", use_column_width=True)
118
  text_placeholder.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
119
 
@@ -148,4 +146,4 @@ elif upload_choice == "Upload Video":
148
  video_source = cv2.VideoCapture(tfile.name)
149
  video_feed(video_source)
150
 
151
- st.sidebar.write("Emotion Labels: Angry, Fear, Happy, Neutral, Sad, Surprise")
 
6
  from keras.models import load_model
7
  from PIL import Image
8
  import mediapipe as mp
9
+ import requests
10
  import tempfile
11
 
12
+ # API endpoint for sending data
13
+ API_ENDPOINT = "https://huggingface.glitch.me/update"
14
 
15
+ # Title
16
+ st.markdown("<h1 style='text-align: center;'>Emotion Detection with Face Recognition</h1>", unsafe_allow_html=True)
17
  st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
18
 
19
+ # Load emotion detection model
 
 
 
20
  @st.cache_resource
21
  def load_emotion_model():
22
+ model = load_model('CNN_Model_acc_75.h5')
23
  return model
24
 
25
  model = load_emotion_model()
 
 
 
26
  emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
27
 
28
+ # Load known faces
29
  known_faces = []
30
  known_names = []
31
  face_recognizer = cv2.face.LBPHFaceRecognizer_create()
32
 
33
  def load_known_faces():
34
+ folder_path = "known_faces"
35
  for image_name in os.listdir(folder_path):
36
  if image_name.endswith(('.jpg', '.jpeg', '.png')):
37
  image_path = os.path.join(folder_path, image_name)
38
  image = cv2.imread(image_path)
39
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
 
40
  faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(
41
  gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)
42
  )
 
43
  for (x, y, w, h) in faces:
44
  roi_gray = gray[y:y+h, x:x+w]
45
  known_faces.append(roi_gray)
46
+ known_names.append(image_name.split('.')[0])
 
 
47
  face_recognizer.train(known_faces, np.array([i for i in range(len(known_faces))]))
48
 
49
  load_known_faces()
50
 
 
51
  mp_face_detection = mp.solutions.face_detection.FaceDetection(min_detection_confidence=0.5)
52
 
53
+ # Function to send data to API
54
+ def send_data_to_api(name, emotion):
55
+ try:
56
+ data = {'name': name, 'emotion': emotion}
57
+ response = requests.post(API_ENDPOINT, data=data)
58
+ if response.status_code == 200:
59
+ st.success(f"Data sent successfully: {response.json()}")
60
+ else:
61
+ st.error(f"Failed to send data: {response.text}")
62
+ except Exception as e:
63
+ st.error(f"Error sending data to API: {str(e)}")
64
+
65
  # Process a single frame
66
  def process_frame(frame):
67
  rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
68
  results = mp_face_detection.process(rgb_frame)
69
 
70
+ result_text = ""
 
71
  if results.detections:
72
  for detection in results.detections:
73
  bboxC = detection.location_data.relative_bounding_box
 
82
  roi_gray = cv2.cvtColor(roi_color, cv2.COLOR_BGR2GRAY)
83
  face_roi = cv2.resize(roi_color, (48, 48))
84
  face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB)
85
+ face_roi = np.expand_dims(face_roi, axis=0) / 255.0
86
 
 
87
  predictions = model.predict(face_roi)
88
  emotion = emotion_labels[np.argmax(predictions[0])]
89
 
 
90
  name = "Unknown"
91
  label, confidence = face_recognizer.predict(roi_gray)
92
  if confidence < 100:
93
  name = known_names[label]
94
 
 
95
  result_text = f"{name} is feeling {emotion}"
96
+ if name != "Unknown":
97
+ send_data_to_api(name, emotion)
98
 
 
99
  cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
100
  cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
101
 
 
103
 
104
  # Video feed display
105
  def video_feed(video_source):
106
+ frame_placeholder = st.empty()
107
+ text_placeholder = st.empty()
108
 
109
  while True:
110
  ret, frame = video_source.read()
 
112
  break
113
 
114
  frame, result_text = process_frame(frame)
 
 
115
  frame_placeholder.image(frame, channels="BGR", use_column_width=True)
116
  text_placeholder.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
117
 
 
146
  video_source = cv2.VideoCapture(tfile.name)
147
  video_feed(video_source)
148
 
149
+ st.sidebar.write("Emotion Labels: Angry, Fear, Happy, Neutral, Sad, Surprise")