|
import streamlit as st |
|
import cv2 |
|
import os |
|
import numpy as np |
|
from keras.models import load_model |
|
from PIL import Image |
|
import sqlite3 |
|
from huggingface_hub import HfApi |
|
from datetime import datetime |
|
from sklearn.preprocessing import LabelEncoder |
|
|
|
|
|
KNOWN_FACES_DIR = "known_faces" |
|
DATABASE = "students.db" |
|
EMOTION_MODEL_FILE = "CNN_Model_acc_75.h5" |
|
EMOTION_LABELS = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"] |
|
REPO_NAME = "face_and_emotion_detection" |
|
REPO_ID = f"LovnishVerma/{REPO_NAME}" |
|
|
|
|
|
os.makedirs(KNOWN_FACES_DIR, exist_ok=True) |
|
|
|
|
|
hf_token = os.getenv("upload") |
|
if not hf_token: |
|
st.error("Hugging Face token not found. Please set the environment variable.") |
|
st.stop() |
|
|
|
|
|
api = HfApi() |
|
try: |
|
api.create_repo(repo_id=REPO_ID, repo_type="space", space_sdk="streamlit", token=hf_token, exist_ok=True) |
|
st.success(f"Repository '{REPO_NAME}' is ready on Hugging Face!") |
|
except Exception as e: |
|
st.error(f"Error creating Hugging Face repository: {e}") |
|
|
|
|
|
try: |
|
emotion_model = load_model(EMOTION_MODEL_FILE) |
|
except Exception as e: |
|
st.error(f"Error loading emotion model: {e}") |
|
st.stop() |
|
|
|
|
|
def initialize_database(): |
|
""" Initializes the SQLite database by creating the students table if it doesn't exist. """ |
|
conn = sqlite3.connect(DATABASE) |
|
cursor = conn.cursor() |
|
cursor.execute(""" |
|
CREATE TABLE IF NOT EXISTS students ( |
|
id INTEGER PRIMARY KEY AUTOINCREMENT, |
|
name TEXT NOT NULL, |
|
roll_no TEXT NOT NULL UNIQUE, |
|
image_path TEXT NOT NULL, |
|
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP |
|
) |
|
""") |
|
conn.commit() |
|
conn.close() |
|
|
|
def save_to_database(name, roll_no, image_path): |
|
""" Saves the student's data to the database. """ |
|
conn = sqlite3.connect(DATABASE) |
|
cursor = conn.cursor() |
|
try: |
|
cursor.execute(""" |
|
INSERT INTO students (name, roll_no, image_path) |
|
VALUES (?, ?, ?) |
|
""", (name, roll_no, image_path)) |
|
conn.commit() |
|
st.success("Data saved successfully!") |
|
except sqlite3.IntegrityError: |
|
st.error("Roll number already exists!") |
|
finally: |
|
conn.close() |
|
|
|
def save_image_to_hugging_face(image, name, roll_no): |
|
""" Saves the image locally and uploads it to Hugging Face. """ |
|
filename = f"{name}_{roll_no}.jpg" |
|
local_path = os.path.join(KNOWN_FACES_DIR, filename) |
|
image.save(local_path) |
|
|
|
try: |
|
api.upload_file(path_or_fileobj=local_path, path_in_repo=filename, repo_id=REPO_ID, repo_type="space", token=hf_token) |
|
st.success(f"Image uploaded to Hugging Face: {filename}") |
|
except Exception as e: |
|
st.error(f"Error uploading image to Hugging Face: {e}") |
|
|
|
return local_path |
|
|
|
|
|
initialize_database() |
|
|
|
|
|
st.title("Student Registration with Hugging Face Image Upload") |
|
|
|
|
|
name = st.text_input("Enter your name") |
|
roll_no = st.text_input("Enter your roll number") |
|
|
|
|
|
capture_mode = "Use Webcam" |
|
|
|
|
|
picture = st.camera_input("Take a picture") |
|
|
|
|
|
if st.button("Register"): |
|
if not name or not roll_no: |
|
st.error("Please fill in both name and roll number.") |
|
elif not picture: |
|
st.error("Please capture an image using the webcam.") |
|
else: |
|
try: |
|
|
|
if picture: |
|
image = Image.open(picture) |
|
|
|
|
|
image_path = save_image_to_hugging_face(image, name, roll_no) |
|
save_to_database(name, roll_no, image_path) |
|
except Exception as e: |
|
st.error(f"An error occurred: {e}") |
|
|
|
|
|
if st.checkbox("Show registered students"): |
|
conn = sqlite3.connect(DATABASE) |
|
cursor = conn.cursor() |
|
cursor.execute("SELECT name, roll_no, image_path, timestamp FROM students") |
|
rows = cursor.fetchall() |
|
conn.close() |
|
|
|
st.write("### Registered Students") |
|
for row in rows: |
|
name, roll_no, image_path, timestamp = row |
|
st.write(f"**Name:** {name}, **Roll No:** {roll_no}, **Timestamp:** {timestamp}") |
|
st.image(image_path, caption=f"{name} ({roll_no})", use_column_width=True) |
|
|
|
|
|
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') |
|
|
|
|
|
face_recognizer = cv2.face_LBPHFaceRecognizer_create() |
|
|
|
|
|
def train_face_recognizer(): |
|
faces = [] |
|
labels = [] |
|
label_encoder = LabelEncoder() |
|
|
|
|
|
for filename in os.listdir(KNOWN_FACES_DIR): |
|
if filename.endswith(".jpg"): |
|
image_path = os.path.join(KNOWN_FACES_DIR, filename) |
|
image = cv2.imread(image_path) |
|
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) |
|
|
|
|
|
faces_detected = face_cascade.detectMultiScale(gray_image, 1.3, 5) |
|
for (x, y, w, h) in faces_detected: |
|
face = gray_image[y:y+h, x:x+w] |
|
faces.append(face) |
|
labels.append(filename.split('_')[0]) |
|
|
|
labels = label_encoder.fit_transform(labels) |
|
face_recognizer.train(faces, np.array(labels)) |
|
st.success("Face recognizer trained successfully!") |
|
|
|
train_face_recognizer() |
|
|
|
|
|
def detect_faces_and_emotions(image): |
|
|
|
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) |
|
|
|
|
|
faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.3, minNeighbors=5) |
|
|
|
|
|
for (x, y, w, h) in faces: |
|
face = gray_image[y:y+h, x:x+w] |
|
resized_face = cv2.resize(face, (48, 48)) |
|
rgb_face = cv2.cvtColor(resized_face, cv2.COLOR_BGR2RGB) |
|
normalized_face = rgb_face / 255.0 |
|
reshaped_face = np.reshape(normalized_face, (1, 48, 48, 3)) |
|
|
|
|
|
emotion_prediction = emotion_model.predict(reshaped_face) |
|
emotion_label = np.argmax(emotion_prediction) |
|
|
|
|
|
label, confidence = face_recognizer.predict(face) |
|
recognized_label = label_encoder.inverse_transform([label])[0] |
|
|
|
return EMOTION_LABELS[emotion_label], recognized_label |
|
|
|
return None, None |
|
|
|
|
|
if st.sidebar.selectbox("Menu", ["Register Student", "Face Recognition and Emotion Detection", "View Attendance"]) == "Face Recognition and Emotion Detection": |
|
st.subheader("Recognize Faces and Detect Emotions") |
|
|
|
st.info("Use the camera input widget to capture an image.") |
|
camera_image = st.camera_input("Take a picture") |
|
if camera_image: |
|
img = Image.open(camera_image) |
|
img_array = np.array(img) |
|
|
|
|
|
emotion_label, recognized_label = detect_faces_and_emotions(img_array) |
|
|
|
if emotion_label: |
|
st.success(f"Emotion Detected: {emotion_label}") |
|
st.success(f"Face Recognized as: {recognized_label}") |
|
else: |
|
st.warning("No face detected.") |
|
|