Anushree1 commited on
Commit
dff63c2
·
verified ·
1 Parent(s): 96568cc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -68
app.py CHANGED
@@ -1,82 +1,69 @@
1
- import gradio as gr
2
  import cv2
3
- import face_recognition
4
  import numpy as np
5
- import pickle
6
- import os
7
-
8
- # Directory where face images are stored
9
- dataset_dir = "face_images"
10
-
11
- # Check if face encodings file exists, if not create it
12
- encodings_file = "face_encodings.pkl"
13
- if not os.path.exists(encodings_file):
14
- def create_encodings():
15
- known_face_encodings = []
16
- known_face_names = []
17
-
18
- for person_name in os.listdir(dataset_dir):
19
- person_folder = os.path.join(dataset_dir, person_name)
20
- if not os.path.isdir(person_folder):
21
- continue
22
-
23
- for filename in os.listdir(person_folder):
24
- image_path = os.path.join(person_folder, filename)
25
- image = face_recognition.load_image_file(image_path)
26
- face_encodings = face_recognition.face_encodings(image)
27
-
28
- if face_encodings:
29
- known_face_encodings.append(face_encodings[0])
30
- known_face_names.append(person_name)
31
 
32
- encodings_data = {"encodings": known_face_encodings, "names": known_face_names}
33
- with open(encodings_file, "wb") as file:
34
- pickle.dump(encodings_data, file)
35
- print("Encodings saved to face_encodings.pkl")
36
 
37
- create_encodings()
 
 
 
38
 
39
- # Load face encodings
40
- with open(encodings_file, "rb") as file:
41
- data = pickle.load(file)
42
- known_face_encodings = data["encodings"]
43
- known_face_names = data["names"]
 
 
 
44
 
45
- def capture_and_recognize():
46
- cap = cv2.VideoCapture(0)
47
- ret, frame = cap.read()
48
- if not ret:
49
- cap.release()
50
- return None, "Failed to capture image from webcam."
 
 
51
 
52
- rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
53
- face_locations = face_recognition.face_locations(rgb_image)
54
- face_encodings = face_recognition.face_encodings(rgb_image, face_locations)
 
 
 
 
 
 
 
55
 
56
- names = []
57
- for face_encoding in face_encodings:
58
- matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
59
- name = "Unknown"
60
- face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
61
- best_match_index = np.argmin(face_distances)
62
- if matches[best_match_index]:
63
- name = known_face_names[best_match_index]
64
- names.append(name)
65
 
66
- for (top, right, bottom, left), name in zip(face_locations, names):
67
- cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)
68
- cv2.putText(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
69
 
70
- cap.release()
71
- return frame, "Recognized Names: " + ", ".join(names)
 
72
 
 
73
  iface = gr.Interface(
74
- fn=capture_and_recognize,
75
- inputs=[],
76
- outputs=["image", "text"],
77
- live=True,
78
- title="Facial Recognition Attendance System",
79
- description="Capture your face for attendance."
80
  )
81
 
82
- iface.launch()
 
 
 
 
1
  import cv2
2
+ import mediapipe as mp
3
  import numpy as np
4
+ import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ # Initialize Mediapipe solutions
7
+ mp_hands = mp.solutions.hands
8
+ mp_face_mesh = mp.solutions.face_mesh
9
+ mp_drawing = mp.solutions.drawing_utils
10
 
11
+ # Function to process and draw landmarks on the input image
12
+ def process_image(input_image):
13
+ # Convert input image to RGB for Mediapipe
14
+ rgb_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
15
 
16
+ # Initialize Hands and Face Mesh models
17
+ with mp_hands.Hands(static_image_mode=True, min_detection_confidence=0.5) as hands, \
18
+ mp_face_mesh.FaceMesh(static_image_mode=True, min_detection_confidence=0.5) as face_mesh:
19
+
20
+ # Process the image for hands
21
+ hand_results = hands.process(rgb_image)
22
+ # Process the image for face mesh
23
+ face_results = face_mesh.process(rgb_image)
24
 
25
+ # Draw Hand Landmarks
26
+ if hand_results.multi_hand_landmarks:
27
+ for hand_landmarks in hand_results.multi_hand_landmarks:
28
+ mp_drawing.draw_landmarks(
29
+ input_image, hand_landmarks, mp_hands.HAND_CONNECTIONS,
30
+ mp_drawing.DrawingSpec(color=(121, 22, 76), thickness=2, circle_radius=4),
31
+ mp_drawing.DrawingSpec(color=(250, 44, 250), thickness=2, circle_radius=2)
32
+ )
33
 
34
+ # Draw Face Mesh Landmarks
35
+ if face_results.multi_face_landmarks:
36
+ for face_landmarks in face_results.multi_face_landmarks:
37
+ mp_drawing.draw_landmarks(
38
+ input_image, face_landmarks, mp_face_mesh.FACEMESH_TESSELATION,
39
+ mp_drawing.DrawingSpec(color=(80, 110, 10), thickness=1, circle_radius=1),
40
+ mp_drawing.DrawingSpec(color=(80, 256, 121), thickness=1, circle_radius=1)
41
+ )
42
+
43
+ return input_image
44
 
45
+ # Gradio interface
46
+ def gradio_interface(image):
47
+ # Convert Gradio PIL image to OpenCV format
48
+ image = np.array(image)
49
+ image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
 
 
 
 
50
 
51
+ # Process the image to detect landmarks
52
+ processed_image = process_image(image)
 
53
 
54
+ # Convert the processed image back to RGB for display in Gradio
55
+ processed_image = cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB)
56
+ return processed_image
57
 
58
+ # Define Gradio app
59
  iface = gr.Interface(
60
+ fn=gradio_interface,
61
+ inputs=gr.Image(type="pil", tool="editor"),
62
+ outputs=gr.Image(type="numpy"),
63
+ title="Face and Hand Landmarks Detection",
64
+ description="Upload an image or take a photo to detect face and hand landmarks using Mediapipe and OpenCV."
 
65
  )
66
 
67
+ # Launch Gradio app
68
+ if __name__ == "__main__":
69
+ iface.launch()