Spaces:
Build error
Build error
clicking on face
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ import PIL
|
|
5 |
from PIL import Image, ImageDraw, ImageFont
|
6 |
import time
|
7 |
|
|
|
8 |
dbackends = [
|
9 |
['Haar Cascade (OpenCV)','opencv'],
|
10 |
['Single Shot MultiBox Detector (OpenCV)','ssd'],
|
@@ -12,44 +13,59 @@ dbackends = [
|
|
12 |
['RetinaFace','retinaface'],
|
13 |
['You Only Look Once v8','yolov8'],
|
14 |
['YuNet','yunet'],
|
15 |
-
|
16 |
['Fast Multi-task Cascade Convolutional Neural Network (PyTorch)','fastmtcnn']
|
17 |
]
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
faceannotations = []
|
30 |
-
for face_obj in face_objs:
|
31 |
-
face_coordinates = (face_obj["facial_area"]["x"],face_obj["facial_area"]["y"], (face_obj["facial_area"]["x"] + face_obj["facial_area"]["w"]),(face_obj["facial_area"]["y"] + face_obj["facial_area"]["h"]))
|
32 |
-
face_confidence = "{:.0%}".format(face_obj["confidence"])
|
33 |
-
face_result=[face_coordinates,face_confidence]
|
34 |
-
faceannotations.append(face_result)
|
35 |
-
jsontext=faceannotations
|
36 |
-
run_time = str(round((time.time() - start_time),2))
|
37 |
-
results = gr.AnnotatedImage(
|
38 |
-
label= "Detected " + str(numberoffaces) + " faces via " + dbackend + ' in ' + run_time + ' seconds.',
|
39 |
-
value=(imgfile, faceannotations)
|
40 |
-
)
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
dbackendchoice = gr.Radio(choices=dbackends,label='Detector Backend:',value='retinaface')
|
46 |
-
demo = gr.Interface(
|
47 |
-
allow_flagging = "never",
|
48 |
-
fn=findFaces,
|
49 |
-
inputs=[gr.Image(value="8428_26_SM.jpg"), dbackendchoice],
|
50 |
-
outputs=[annotated_image,jsontext],
|
51 |
|
52 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
demo.launch(show_error=True)
|
55 |
|
|
|
5 |
from PIL import Image, ImageDraw, ImageFont
|
6 |
import time
|
7 |
|
8 |
+
|
9 |
dbackends = [
|
10 |
['Haar Cascade (OpenCV)','opencv'],
|
11 |
['Single Shot MultiBox Detector (OpenCV)','ssd'],
|
|
|
13 |
['RetinaFace','retinaface'],
|
14 |
['You Only Look Once v8','yolov8'],
|
15 |
['YuNet','yunet'],
|
16 |
+
# ['Multi-task Cascade Convolutional Neural Network (TensorFlow) ','mtcnn'],
|
17 |
['Fast Multi-task Cascade Convolutional Neural Network (PyTorch)','fastmtcnn']
|
18 |
]
|
19 |
|
20 |
+
|
21 |
+
with gr.Blocks() as demo:
|
22 |
+
|
23 |
+
|
24 |
+
annotated_image = gr.AnnotatedImage()
|
25 |
+
|
26 |
+
jsontext = gr.Text(label= "deepface extract_faces results")
|
27 |
+
selected_face = gr.Textbox(label="Selected Face")
|
28 |
+
|
29 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
+
def findFaces(imgfile,dbackend):
|
32 |
+
start_time = time.time()
|
33 |
+
print(start_time)
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
+
face_objs = DeepFace.extract_faces(img_path = imgfile, enforce_detection = False, detector_backend = dbackend)
|
36 |
+
|
37 |
+
numberoffaces = len(face_objs)
|
38 |
+
jsontext = ''
|
39 |
+
global faceannotations
|
40 |
+
faceannotations = []
|
41 |
+
for face_obj in face_objs:
|
42 |
+
face_coordinates = (face_obj["facial_area"]["x"],face_obj["facial_area"]["y"], (face_obj["facial_area"]["x"] + face_obj["facial_area"]["w"]),(face_obj["facial_area"]["y"] + face_obj["facial_area"]["h"]))
|
43 |
+
face_confidence = "{:.0%}".format(face_obj["confidence"])
|
44 |
+
face_result=[face_coordinates,face_confidence]
|
45 |
+
faceannotations.append(face_result)
|
46 |
+
|
47 |
+
jsontext=faceannotations
|
48 |
+
#jsontext=face_objs
|
49 |
+
run_time = str(round((time.time() - start_time),2))
|
50 |
+
results = gr.AnnotatedImage(
|
51 |
+
label= "Detected " + str(numberoffaces) + " faces via " + dbackend + ' in ' + run_time + ' seconds.',
|
52 |
+
value=(imgfile, faceannotations)
|
53 |
+
)
|
54 |
+
|
55 |
+
print(run_time)
|
56 |
+
return(results,jsontext,numberoffaces,run_time)
|
57 |
+
|
58 |
+
dbackendchoice = gr.Radio(choices=dbackends,label='Detector Backend:',value='retinaface')
|
59 |
+
gr.Interface(
|
60 |
+
allow_flagging = "never",
|
61 |
+
fn=findFaces,
|
62 |
+
inputs=[gr.Image(value="8428_26_SM.jpg"), dbackendchoice],
|
63 |
+
outputs=[annotated_image,jsontext,selected_face],
|
64 |
+
)
|
65 |
+
def select_section(evt: gr.SelectData):
|
66 |
+
return faceannotations[evt.index]
|
67 |
+
|
68 |
+
annotated_image.select(select_section, None, selected_face)
|
69 |
|
70 |
demo.launch(show_error=True)
|
71 |
|