Spaces:
Runtime error
Runtime error
Commit
·
9b52443
1
Parent(s):
7b06bf4
Upload 4 files
Browse files- app.py +54 -0
- ex1.jpg +0 -0
- nohands.png +0 -0
- requirements.txt +2 -0
app.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import mediapipe as mp
|
| 4 |
+
mp_drawing = mp.solutions.drawing_utils
|
| 5 |
+
mp_drawing_styles = mp.solutions.drawing_styles
|
| 6 |
+
mp_hands = mp.solutions.hands
|
| 7 |
+
|
| 8 |
+
# For static images:
|
| 9 |
+
def fun(img):
|
| 10 |
+
print(type(img))
|
| 11 |
+
with mp_hands.Hands(static_image_mode=True,max_num_hands=2,min_detection_confidence=0.5) as hands:
|
| 12 |
+
#for idx, file in enumerate(IMAGE_FILES):
|
| 13 |
+
# Read an image, flip it around y-axis for correct handedness output (see
|
| 14 |
+
# above).
|
| 15 |
+
#cv2.imwrite('img.png',img[:,:,::-1])
|
| 16 |
+
#image = cv2.flip(cv2.imread('./img.png'), 1)
|
| 17 |
+
image = cv2.flip(img[:,:,::-1], 1)
|
| 18 |
+
# Convert the BGR image to RGB before processing.
|
| 19 |
+
results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
| 20 |
+
print(type(results.multi_handedness))
|
| 21 |
+
if results.multi_handedness == None:
|
| 22 |
+
return(cv2.imread('nohands.png'))
|
| 23 |
+
# Print handedness and draw hand landmarks on the image.
|
| 24 |
+
print('Handedness:', results.multi_handedness)
|
| 25 |
+
#if not results.multi_hand_landmarks:
|
| 26 |
+
# continue
|
| 27 |
+
|
| 28 |
+
image_height, image_width, _ = image.shape
|
| 29 |
+
annotated_image = image.copy()
|
| 30 |
+
for hand_landmarks in results.multi_hand_landmarks:
|
| 31 |
+
print('hand_landmarks:', hand_landmarks)
|
| 32 |
+
print(
|
| 33 |
+
f'Index finger tip coordinates: (',
|
| 34 |
+
f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].x * image_width}, '
|
| 35 |
+
f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].y * image_height})'
|
| 36 |
+
)
|
| 37 |
+
mp_drawing.draw_landmarks(
|
| 38 |
+
annotated_image,
|
| 39 |
+
hand_landmarks,
|
| 40 |
+
mp_hands.HAND_CONNECTIONS,
|
| 41 |
+
mp_drawing_styles.get_default_hand_landmarks_style(),
|
| 42 |
+
mp_drawing_styles.get_default_hand_connections_style())
|
| 43 |
+
res = cv2.flip(annotated_image, 1)
|
| 44 |
+
#cv2.imwrite('res.png',res[:,:,::-1])
|
| 45 |
+
print(res.shape)
|
| 46 |
+
return res[:,:,::-1]
|
| 47 |
+
|
| 48 |
+
image = gr.inputs.Image(source='webcam')
|
| 49 |
+
keypoint = gr.outputs.Image()
|
| 50 |
+
examples = ['ex1.jpg']
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
intf = gr.Interface(fn=fun, inputs=image, outputs=keypoint,examples=examples)
|
| 54 |
+
intf.launch(inline=False,debug=True)
|
ex1.jpg
ADDED
|
nohands.png
ADDED
|
requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
mediapipe
|
| 2 |
+
gradio
|