|
import cv2 |
|
import mediapipe as mp |
|
import numpy as np |
|
import gradio as gr |
|
|
|
|
|
mp_hands = mp.solutions.hands |
|
mp_face_mesh = mp.solutions.face_mesh |
|
mp_drawing = mp.solutions.drawing_utils |
|
|
|
|
|
def process_image(input_image): |
|
|
|
rgb_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB) |
|
|
|
|
|
with mp_hands.Hands(static_image_mode=True, min_detection_confidence=0.5) as hands, \ |
|
mp_face_mesh.FaceMesh(static_image_mode=True, min_detection_confidence=0.5) as face_mesh: |
|
|
|
|
|
hand_results = hands.process(rgb_image) |
|
|
|
face_results = face_mesh.process(rgb_image) |
|
|
|
|
|
if hand_results.multi_hand_landmarks: |
|
for hand_landmarks in hand_results.multi_hand_landmarks: |
|
mp_drawing.draw_landmarks( |
|
input_image, hand_landmarks, mp_hands.HAND_CONNECTIONS, |
|
mp_drawing.DrawingSpec(color=(121, 22, 76), thickness=2, circle_radius=4), |
|
mp_drawing.DrawingSpec(color=(250, 44, 250), thickness=2, circle_radius=2) |
|
) |
|
|
|
|
|
if face_results.multi_face_landmarks: |
|
for face_landmarks in face_results.multi_face_landmarks: |
|
mp_drawing.draw_landmarks( |
|
input_image, face_landmarks, mp_face_mesh.FACEMESH_TESSELATION, |
|
mp_drawing.DrawingSpec(color=(80, 110, 10), thickness=1, circle_radius=1), |
|
mp_drawing.DrawingSpec(color=(80, 256, 121), thickness=1, circle_radius=1) |
|
) |
|
|
|
return input_image |
|
|
|
|
|
def gradio_interface(image): |
|
|
|
image = np.array(image) |
|
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) |
|
|
|
|
|
processed_image = process_image(image) |
|
|
|
|
|
processed_image = cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB) |
|
return processed_image |
|
|
|
|
|
iface = gr.Interface( |
|
fn=gradio_interface, |
|
inputs=gr.Image(type="pil"), |
|
outputs=gr.Image(type="numpy"), |
|
title="Face and Hand Landmarks Detection", |
|
description="Upload an image or take a photo to detect face and hand landmarks using Mediapipe and OpenCV." |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
iface.launch() |
|
|