File size: 5,753 Bytes
2d75c41
c7b74ee
 
 
2d75c41
c7b74ee
 
2d75c41
c7b74ee
2d75c41
 
 
 
9cd5b33
 
2d75c41
9cd5b33
2d75c41
c7b74ee
 
2d75c41
 
c7b74ee
2d75c41
 
c7b74ee
2d75c41
9cd5b33
c7b74ee
2d75c41
 
 
5aaa444
2d75c41
 
 
 
e89a8b7
2d75c41
6f924cf
 
5aaa444
 
 
 
 
 
 
2d75c41
5aaa444
 
 
 
2d75c41
 
 
 
 
 
 
c7b74ee
 
 
e8a2db8
 
c7b74ee
2d75c41
 
cd66699
 
 
 
2d75c41
 
 
 
 
6f924cf
5aaa444
2d75c41
6f924cf
 
2d75c41
e8a2db8
96e2d5b
e8a2db8
2d75c41
c7b74ee
e8a2db8
c7b74ee
e8a2db8
c7b74ee
2d75c41
c7b74ee
2d75c41
e8a2db8
c7b74ee
 
 
 
 
 
e8a2db8
2d75c41
 
c7b74ee
 
e8a2db8
 
 
2d75c41
55220d5
c7b74ee
 
e89a8b7
 
 
 
 
 
 
 
 
c7b74ee
 
2d75c41
c7b74ee
 
 
e8a2db8
2d75c41
e8a2db8
2d75c41
55220d5
 
 
 
 
 
 
 
 
2d75c41
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import streamlit as st
import cv2
import numpy as np
import time
import os
from keras.models import load_model
from PIL import Image
import tempfile

# Larger title
st.markdown("<h1 style='text-align: center;'>Emotion Detection with Face Recognition</h1>", unsafe_allow_html=True)

# Smaller subtitle
st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)

start = time.time()

# Load the emotion model
@st.cache_resource
def load_emotion_model():
    model = load_model('CNN_Model_acc_75.h5')  # Ensure this file is in your Space
    return model

model = load_emotion_model()
print("time taken to load model: ", time.time() - start)

# Emotion labels
emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']

# Load known faces (from images in a folder)
known_faces = []
known_names = []
face_recognizer = cv2.face.LBPHFaceRecognizer_create()

def load_known_faces():
    folder_path = "known_faces"  # Place your folder with known faces here
    for image_name in os.listdir(folder_path):
        if image_name.endswith(('.jpg', '.jpeg', '.png')):
            image_path = os.path.join(folder_path, image_name)
            image = cv2.imread(image_path)
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            # Detect face in the image
            faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))

            for (x, y, w, h) in faces:
                roi_gray = gray[y:y+h, x:x+w]
                # We only need the face, so we crop it and store it for training
                known_faces.append(roi_gray)
                known_names.append(image_name.split('.')[0])  # Assuming file name is the person's name

    # Train the recognizer with the known faces
    face_recognizer.train(known_faces, np.array([i for i in range(len(known_faces))]))

load_known_faces()

# Face detection using OpenCV
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
img_shape = 48

def process_frame(frame):
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))

    result_text = ""  # Initialize the result text for display

    for (x, y, w, h) in faces:
        roi_gray = gray_frame[y:y+h, x:x+w]
        roi_color = frame[y:y+h, x:x+w]
        face_roi = cv2.resize(roi_color, (img_shape, img_shape))  # Resize to 48x48
        face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB)  # Convert to RGB (3 channels)
        face_roi = np.expand_dims(face_roi, axis=0)  # Add batch dimension
        face_roi = face_roi / 255.0  # Normalize the image

        # Emotion detection
        predictions = model.predict(face_roi)
        emotion = emotion_labels[np.argmax(predictions[0])]

        # Face recognition using LBPH
        label, confidence = face_recognizer.predict(roi_gray)
        name = "Unknown"
        if confidence < 100:
            name = known_names[label]

        # Format the result text as "Name is feeling Emotion"
        result_text = f"person is feeling {emotion}"

        # Draw bounding box and label on the frame
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

    return frame, result_text

# Video feed
def video_feed(video_source):
    frame_placeholder = st.empty()  # This placeholder will be used to replace frames in-place
    text_placeholder = st.empty()  # This placeholder will display the result text

    while True:
        ret, frame = video_source.read()
        if not ret:
            break

        frame, result_text = process_frame(frame)

        # Display the frame in the placeholder
        frame_placeholder.image(frame, channels="BGR", use_column_width=True)

        # Display the result text in the text placeholder
        text_placeholder.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)

# Sidebar for video or image upload
upload_choice = st.sidebar.radio("Choose input source", ["Upload Image", "Upload Video", "Camera"])

if upload_choice == "Camera":
    # Use Streamlit's built-in camera input widget for capturing images from the webcam
    image = st.camera_input("Take a picture")

    if image is not None:
        # Convert the image to a numpy array
        frame = np.array(Image.open(image))
        frame, result_text = process_frame(frame)
        st.image(frame, caption='Processed Image', use_column_width=True)
        st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)

elif upload_choice == "Upload Image":
    uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg", "gif"])
    if uploaded_image:
        image = Image.open(uploaded_image)
        frame = np.array(image)
        frame, result_text = process_frame(frame)
        st.image(frame, caption='Processed Image', use_column_width=True)
        st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)

elif upload_choice == "Upload Video":
    uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi", "mkv", "webm"])
    if uploaded_video:
        # Temporarily save the video to disk
        with tempfile.NamedTemporaryFile(delete=False) as tfile:
            tfile.write(uploaded_video.read())
            video_source = cv2.VideoCapture(tfile.name)
            video_feed(video_source)

st.sidebar.write("Emotion Labels: Angry, Fear, Happy, Neutral, Sad, Surprise")