KeyPoint / app.py
datasciencedojo's picture
Upload 4 files
9b52443
raw
history blame
2.06 kB
import cv2
import gradio as gr
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_hands = mp.solutions.hands
# For static images:
def fun(img):
print(type(img))
with mp_hands.Hands(static_image_mode=True,max_num_hands=2,min_detection_confidence=0.5) as hands:
#for idx, file in enumerate(IMAGE_FILES):
# Read an image, flip it around y-axis for correct handedness output (see
# above).
#cv2.imwrite('img.png',img[:,:,::-1])
#image = cv2.flip(cv2.imread('./img.png'), 1)
image = cv2.flip(img[:,:,::-1], 1)
# Convert the BGR image to RGB before processing.
results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
print(type(results.multi_handedness))
if results.multi_handedness == None:
return(cv2.imread('nohands.png'))
# Print handedness and draw hand landmarks on the image.
print('Handedness:', results.multi_handedness)
#if not results.multi_hand_landmarks:
# continue
image_height, image_width, _ = image.shape
annotated_image = image.copy()
for hand_landmarks in results.multi_hand_landmarks:
print('hand_landmarks:', hand_landmarks)
print(
f'Index finger tip coordinates: (',
f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].x * image_width}, '
f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].y * image_height})'
)
mp_drawing.draw_landmarks(
annotated_image,
hand_landmarks,
mp_hands.HAND_CONNECTIONS,
mp_drawing_styles.get_default_hand_landmarks_style(),
mp_drawing_styles.get_default_hand_connections_style())
res = cv2.flip(annotated_image, 1)
#cv2.imwrite('res.png',res[:,:,::-1])
print(res.shape)
return res[:,:,::-1]
image = gr.inputs.Image(source='webcam')
keypoint = gr.outputs.Image()
examples = ['ex1.jpg']
intf = gr.Interface(fn=fun, inputs=image, outputs=keypoint,examples=examples)
intf.launch(inline=False,debug=True)