LovnishVerma's picture
Update app.py
ebc268d verified
raw
history blame
8 kB
import streamlit as st
import cv2
import os
import numpy as np
from keras.models import load_model
from PIL import Image
import sqlite3
from huggingface_hub import HfApi
from datetime import datetime
from sklearn.preprocessing import LabelEncoder
# Constants
KNOWN_FACES_DIR = "known_faces" # Directory to save user images
DATABASE = "students.db" # SQLite database file to store student information
EMOTION_MODEL_FILE = "CNN_Model_acc_75.h5"
EMOTION_LABELS = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]
REPO_NAME = "face_and_emotion_detection"
REPO_ID = f"LovnishVerma/{REPO_NAME}"
# Ensure the directories exist
os.makedirs(KNOWN_FACES_DIR, exist_ok=True)
# Retrieve Hugging Face token from environment variable
hf_token = os.getenv("upload") # Replace with your actual Hugging Face token
if not hf_token:
st.error("Hugging Face token not found. Please set the environment variable.")
st.stop()
# Initialize Hugging Face API
api = HfApi()
try:
api.create_repo(repo_id=REPO_ID, repo_type="space", space_sdk="streamlit", token=hf_token, exist_ok=True)
st.success(f"Repository '{REPO_NAME}' is ready on Hugging Face!")
except Exception as e:
st.error(f"Error creating Hugging Face repository: {e}")
# Load the emotion detection model
try:
emotion_model = load_model(EMOTION_MODEL_FILE)
except Exception as e:
st.error(f"Error loading emotion model: {e}")
st.stop()
# Database Functions
def initialize_database():
""" Initializes the SQLite database by creating the students table if it doesn't exist. """
conn = sqlite3.connect(DATABASE)
cursor = conn.cursor()
cursor.execute("""
CREATE TABLE IF NOT EXISTS students (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
roll_no TEXT NOT NULL UNIQUE,
image_path TEXT NOT NULL,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
)
""")
conn.commit()
conn.close()
def save_to_database(name, roll_no, image_path):
""" Saves the student's data to the database. """
conn = sqlite3.connect(DATABASE)
cursor = conn.cursor()
try:
cursor.execute("""
INSERT INTO students (name, roll_no, image_path)
VALUES (?, ?, ?)
""", (name, roll_no, image_path))
conn.commit()
st.success("Data saved successfully!")
except sqlite3.IntegrityError:
st.error("Roll number already exists!")
finally:
conn.close()
def save_image_to_hugging_face(image, name, roll_no):
""" Saves the image locally and uploads it to Hugging Face. """
filename = f"{name}_{roll_no}.jpg"
local_path = os.path.join(KNOWN_FACES_DIR, filename)
image.save(local_path)
try:
api.upload_file(path_or_fileobj=local_path, path_in_repo=filename, repo_id=REPO_ID, repo_type="space", token=hf_token)
st.success(f"Image uploaded to Hugging Face: {filename}")
except Exception as e:
st.error(f"Error uploading image to Hugging Face: {e}")
return local_path
# Initialize the database when the app starts
initialize_database()
# Streamlit user interface (UI)
st.title("Student Registration with Hugging Face Image Upload")
# Input fields for student details
name = st.text_input("Enter your name")
roll_no = st.text_input("Enter your roll number")
# Choose input method for the image (only webcam now)
capture_mode = "Use Webcam" # Only keep the webcam option now
# Handle webcam capture
picture = st.camera_input("Take a picture") # Capture image using webcam
# Save data and process image on button click
if st.button("Register"):
if not name or not roll_no:
st.error("Please fill in both name and roll number.")
elif not picture:
st.error("Please capture an image using the webcam.")
else:
try:
# Open the image based on capture mode
if picture:
image = Image.open(picture)
# Save the image locally and upload it to Hugging Face
image_path = save_image_to_hugging_face(image, name, roll_no)
save_to_database(name, roll_no, image_path)
except Exception as e:
st.error(f"An error occurred: {e}")
# Display registered student data
if st.checkbox("Show registered students"):
conn = sqlite3.connect(DATABASE)
cursor = conn.cursor()
cursor.execute("SELECT name, roll_no, image_path, timestamp FROM students")
rows = cursor.fetchall()
conn.close()
st.write("### Registered Students")
for row in rows:
name, roll_no, image_path, timestamp = row
st.write(f"**Name:** {name}, **Roll No:** {roll_no}, **Timestamp:** {timestamp}")
st.image(image_path, caption=f"{name} ({roll_no})", use_column_width=True)
# Initialize OpenCV's face detector (Haar Cascade)
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
# Initialize LBPH face recognizer
face_recognizer = cv2.face_LBPHFaceRecognizer_create()
# Function to load and train face recognizer
def train_face_recognizer():
faces = []
labels = []
label_encoder = LabelEncoder()
# Load known faces
for filename in os.listdir(KNOWN_FACES_DIR):
if filename.endswith(".jpg"):
image_path = os.path.join(KNOWN_FACES_DIR, filename)
image = cv2.imread(image_path)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect face(s)
faces_detected = face_cascade.detectMultiScale(gray_image, 1.3, 5)
for (x, y, w, h) in faces_detected:
face = gray_image[y:y+h, x:x+w]
faces.append(face)
labels.append(filename.split('_')[0]) # Assuming name is in the filename
labels = label_encoder.fit_transform(labels)
face_recognizer.train(faces, np.array(labels))
st.success("Face recognizer trained successfully!")
train_face_recognizer()
# Face and Emotion Detection Function
def detect_faces_and_emotions(image):
# Convert the image to grayscale for face detection
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces using OpenCV's Haar Cascade
faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.3, minNeighbors=5)
# If faces are detected, predict emotions and recognize faces
for (x, y, w, h) in faces:
face = gray_image[y:y+h, x:x+w]
resized_face = cv2.resize(face, (48, 48)) # Resize face to 48x48
rgb_face = cv2.cvtColor(resized_face, cv2.COLOR_BGR2RGB)
normalized_face = rgb_face / 255.0
reshaped_face = np.reshape(normalized_face, (1, 48, 48, 3))
# Predict the emotion
emotion_prediction = emotion_model.predict(reshaped_face)
emotion_label = np.argmax(emotion_prediction)
# Recognize the face
label, confidence = face_recognizer.predict(face)
recognized_label = label_encoder.inverse_transform([label])[0]
return EMOTION_LABELS[emotion_label], recognized_label
return None, None
# UI for Emotion Detection (Only using webcam now)
if st.sidebar.selectbox("Menu", ["Register Student", "Face Recognition and Emotion Detection", "View Attendance"]) == "Face Recognition and Emotion Detection":
st.subheader("Recognize Faces and Detect Emotions")
st.info("Use the camera input widget to capture an image.")
camera_image = st.camera_input("Take a picture")
if camera_image:
img = Image.open(camera_image)
img_array = np.array(img)
# Detect emotion and recognize face in the captured image
emotion_label, recognized_label = detect_faces_and_emotions(img_array)
if emotion_label:
st.success(f"Emotion Detected: {emotion_label}")
st.success(f"Face Recognized as: {recognized_label}")
else:
st.warning("No face detected.")