Create new file
Browse files
app.py
ADDED
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
from deepface import DeepFace
|
3 |
+
import pandas as pd
|
4 |
+
import gradio as gr
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
import requests, validators
|
7 |
+
import torch
|
8 |
+
import pathlib
|
9 |
+
from PIL import Image
|
10 |
+
import os
|
11 |
+
|
12 |
+
|
13 |
+
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
|
14 |
+
|
15 |
+
def get_original_image(url_input):
|
16 |
+
"""Extract image from URL"""
|
17 |
+
|
18 |
+
if validators.url(url_input):
|
19 |
+
image = Image.open(requests.get(url_input, stream=True).raw)
|
20 |
+
|
21 |
+
return image
|
22 |
+
|
23 |
+
def face_verification(img1, img2, dist,model,detector):
|
24 |
+
"""Check the similarity of 2 images"""
|
25 |
+
|
26 |
+
try:
|
27 |
+
result = DeepFace.verify(img1_path=img1,img2_path=img2,distance_metric=dist,model_name=model,detector_backend=detector)
|
28 |
+
except:
|
29 |
+
result = DeepFace.verify(img1_path=img1,img2_path=img2,distance_metric=dist,model_name=model,detector_backend=detector,\
|
30 |
+
enforce_detection=False)
|
31 |
+
|
32 |
+
return result['verified'],round(result['distance'],2),result['threshold'],result['model'],result['similarity_metric']
|
33 |
+
|
34 |
+
def facial_analysis(img1, detector):
|
35 |
+
"""Determine emotion, race, gender and age from models"""
|
36 |
+
|
37 |
+
try:
|
38 |
+
#facial analysis
|
39 |
+
obj = DeepFace.analyze(img_path = img1, actions = ['age', 'gender', 'race', 'emotion'],detector_backend=detector)
|
40 |
+
except:
|
41 |
+
obj = DeepFace.analyze(img_path = img1, actions = ['age', 'gender', 'race', 'emotion'],detector_backend=detector,\
|
42 |
+
enforce_detection=False)
|
43 |
+
|
44 |
+
return obj['age'],obj['gender'],obj['dominant_race'],obj['dominant_emotion']
|
45 |
+
|
46 |
+
def face_recognition(img1,dir_loc,model,dist,detector):
|
47 |
+
"""Facial recognition given a database or folder location with images"""
|
48 |
+
|
49 |
+
#face recognition
|
50 |
+
rec = DeepFace.find(img_path = img_1, db_path = dir_loc,distance_metric=dist,model_name=model,detector_backend=detector)
|
51 |
+
|
52 |
+
return rec
|
53 |
+
|
54 |
+
|
55 |
+
def set_example_image(example: list) -> dict:
|
56 |
+
return gr.Image.update(value=example[0])
|
57 |
+
|
58 |
+
def set_example_url(example: list) -> dict:
|
59 |
+
return gr.Textbox.update(value=example[0]), gr.Image.update(value=get_original_image(example[0]))
|
60 |
+
|
61 |
+
|
62 |
+
title = """<h1 id="title">DeepFace for Facial Recognition and Analysis</h1>"""
|
63 |
+
|
64 |
+
description = """
|
65 |
+
Deepface is a lightweight face recognition and facial attribute analysis (age, gender, emotion and race) framework for python. It is a hybrid face recognition framework wrapping state-of-the-art models: VGG-Face, Google FaceNet, OpenFace, Facebook DeepFace, DeepID, ArcFace, Dlib and SFace.
|
66 |
+
|
67 |
+
Experiments show that human beings have 97.53% accuracy on facial recognition tasks whereas those models already reached and passed that accuracy level.
|
68 |
+
Please click on the Github link for more information: [DeepFace](https://github.com/serengil/deepface)
|
69 |
+
|
70 |
+
This space captures facial verification which determines if 2 facial images are the same person and the facial attribute analysis which predicts age,gender, emotion and race. The attribute analysis for age and race is a hit and miss based on my personal experience and the reported test accuracy is 68% for race prediction
|
71 |
+
The age prediction model got ± 4.65 MAE.
|
72 |
+
"""
|
73 |
+
|
74 |
+
models = ["VGG-Face", "Facenet", "Facenet512", "OpenFace", "DeepFace", "DeepID", "ArcFace", "Dlib", "SFace"]
|
75 |
+
metrics = ["cosine", "euclidean", "euclidean_l2"]
|
76 |
+
backends = ['opencv', 'ssd', 'dlib', 'mtcnn', 'retinaface', 'mediapipe']
|
77 |
+
|
78 |
+
urls = [["https://media.vanityfair.com/photos/6036a15657f37ea4415256d2/master/w_2560%2Cc_limit/1225292516",\
|
79 |
+
"https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQSuPVx0JaEW2yp4mT8ZwqFANMT3RhoxcwuuGLhnKgxsA&s"]]
|
80 |
+
|
81 |
+
url = [['https://media.vanityfair.com/photos/6036a15657f37ea4415256d2/master/w_2560%2Cc_limit/1225292516']]
|
82 |
+
|
83 |
+
twitter_link = """
|
84 |
+
[](https://twitter.com/nickmuchi)
|
85 |
+
"""
|
86 |
+
|
87 |
+
css = '''
|
88 |
+
h1#title {
|
89 |
+
text-align: center;
|
90 |
+
}
|
91 |
+
'''
|
92 |
+
demo = gr.Blocks(css=css)
|
93 |
+
|
94 |
+
with demo:
|
95 |
+
gr.Markdown(title)
|
96 |
+
gr.Markdown(description)
|
97 |
+
gr.Markdown(twitter_link)
|
98 |
+
model_options = gr.Dropdown(choices=models,label='Facial Recognition Models',value=models[1],show_label=True)
|
99 |
+
metric_options = gr.Radio(choices=metrics,label='Distance Metric', value=metrics[0],show_label=True)
|
100 |
+
backends_options = gr.Dropdown(choices=backends,label='Face Detector',value=backends[-2],show_label=True)
|
101 |
+
|
102 |
+
with gr.Tabs():
|
103 |
+
with gr.TabItem('Facial Recognition'):
|
104 |
+
with gr.Tabs():
|
105 |
+
with gr.TabItem("URL Images"):
|
106 |
+
|
107 |
+
with gr.Row():
|
108 |
+
with gr.Column():
|
109 |
+
url_input_1_fr = gr.Textbox(lines=2,label='Image URL 1')
|
110 |
+
url_image_1_fr = gr.Image(label='Image 1',shape=(550,550),interactive=False)
|
111 |
+
url_input_1_fr.change(get_original_image, url_input_1_fr, url_image_1_fr)
|
112 |
+
url_input_2 = gr.Textbox(lines=2,label='Image URL 2')
|
113 |
+
url_image_2 = gr.Image(label='Image 2',shape=(550,550),interactive=False)
|
114 |
+
url_input_2.change(get_original_image, url_input_2, url_image_2)
|
115 |
+
|
116 |
+
with gr.Column():
|
117 |
+
sim_from_url = gr.Label(label='Same Person')
|
118 |
+
dist_from_url = gr.Label(label = 'Distance')
|
119 |
+
thresh_from_url = gr.Label(label = 'Threshold to Verify')
|
120 |
+
model_from_url = gr.Label(label = 'Model Name')
|
121 |
+
metric_from_url = gr.Label(label = 'Similarity Metric')
|
122 |
+
|
123 |
+
|
124 |
+
with gr.Row():
|
125 |
+
example_url = gr.Examples(examples=urls,inputs=[url_input_1_fr,url_input_2])
|
126 |
+
|
127 |
+
url_but_fr = gr.Button('Verify')
|
128 |
+
|
129 |
+
with gr.TabItem("Upload Images"):
|
130 |
+
|
131 |
+
with gr.Row():
|
132 |
+
with gr.Column():
|
133 |
+
upload_image_1_fr = gr.Image(label='Image 1',shape=(550,550),interactive=True)
|
134 |
+
upload_image_2 = gr.Image(label='Image 2',shape=(550,550),interactive=True)
|
135 |
+
with gr.Column():
|
136 |
+
sim_from_upload = gr.Label(label='Same Person')
|
137 |
+
dist_from_upload = gr.Label(label = 'Distance')
|
138 |
+
thresh_from_upload = gr.Label(label = 'Threshold to Verify')
|
139 |
+
model_from_upload = gr.Label(label = 'Model Name')
|
140 |
+
metric_from_upload = gr.Label(label = 'Similarity Metric')
|
141 |
+
|
142 |
+
with gr.Row():
|
143 |
+
example_images = gr.Dataset(components=[upload_image_1_fr],
|
144 |
+
samples=[[path.as_posix()] for path in sorted(pathlib.Path('images').rglob('*.j*g'))])
|
145 |
+
|
146 |
+
up_but_fr = gr.Button('Verify')
|
147 |
+
|
148 |
+
|
149 |
+
with gr.TabItem('Facial Analysis'):
|
150 |
+
|
151 |
+
with gr.Tabs():
|
152 |
+
with gr.TabItem("URL Image"):
|
153 |
+
|
154 |
+
with gr.Row():
|
155 |
+
with gr.Column():
|
156 |
+
url_input_1_fa = gr.Textbox(lines=2,label='Enter valid image URL here..')
|
157 |
+
url_image_1_fa = gr.Image(label='Image 1',shape=(550,550))
|
158 |
+
url_input_1_fa.change(get_original_image, url_input_1_fa, url_image_1_fa)
|
159 |
+
|
160 |
+
|
161 |
+
with gr.Column():
|
162 |
+
age_from_url = gr.Label(label='Age')
|
163 |
+
gender_from_url = gr.Label(label = 'Gender')
|
164 |
+
emo_from_url = gr.Label(label = 'Emotion')
|
165 |
+
race_from_url = gr.Label(label = 'Race')
|
166 |
+
|
167 |
+
with gr.Row():
|
168 |
+
example_url = gr.Examples(examples=url,inputs=[url_input_1_fa])
|
169 |
+
|
170 |
+
url_but_fa = gr.Button('Analyze')
|
171 |
+
|
172 |
+
with gr.TabItem("Upload Image"):
|
173 |
+
|
174 |
+
with gr.Row():
|
175 |
+
with gr.Column():
|
176 |
+
upload_image_1_fa = gr.Image(label='Image 1',shape=(550,550))
|
177 |
+
|
178 |
+
with gr.Column():
|
179 |
+
age_from_upload = gr.Label(label='Age')
|
180 |
+
gender_from_upload = gr.Label(label = 'Gender')
|
181 |
+
emo_from_upload = gr.Label(label = 'Emotion')
|
182 |
+
race_from_upload = gr.Label(label = 'Race')
|
183 |
+
|
184 |
+
with gr.Row():
|
185 |
+
example_images = gr.Dataset(components=[upload_image_1_fa],
|
186 |
+
samples=[[path.as_posix()] for path in sorted(pathlib.Path('images').rglob('*.j*g'))])
|
187 |
+
|
188 |
+
up_but_fa = gr.Button('Analyze')
|
189 |
+
|
190 |
+
|
191 |
+
with gr.TabItem("WebCam Image"):
|
192 |
+
|
193 |
+
with gr.Row():
|
194 |
+
with gr.Column():
|
195 |
+
web_image = gr.Image(label='WebCam Image',source='webcam',shape=(550,550),streaming=True)
|
196 |
+
|
197 |
+
with gr.Column():
|
198 |
+
age_from_web = gr.Label(label='Age')
|
199 |
+
gender_from_web = gr.Label(label = 'Gender')
|
200 |
+
emo_from_web = gr.Label(label = 'Emotion')
|
201 |
+
race_from_web = gr.Label(label = 'Race')
|
202 |
+
|
203 |
+
web_but_fa = gr.Button('Analyze')
|
204 |
+
|
205 |
+
url_but_fr.click(face_verification,inputs=[url_image_1_fr,url_image_2,metric_options,model_options,backends_options],\
|
206 |
+
outputs=[sim_from_url,dist_from_url,thresh_from_url,model_from_url,metric_from_url],queue=True)
|
207 |
+
up_but_fr.click(face_verification,inputs=[upload_image_1_fr,upload_image_2,metric_options,model_options,backends_options],\
|
208 |
+
outputs=[sim_from_upload,dist_from_upload,thresh_from_upload,model_from_upload,metric_from_upload],queue=True)
|
209 |
+
url_but_fa.click(facial_analysis,inputs=[url_image_1_fa,backends_options],\
|
210 |
+
outputs=[age_from_url,gender_from_url,race_from_url,emo_from_url],queue=True)
|
211 |
+
up_but_fa.click(facial_analysis,inputs=[upload_image_1_fa,backends_options],\
|
212 |
+
outputs=[age_from_upload,gender_from_upload,race_from_upload,emo_from_upload])
|
213 |
+
web_but_fa.click(facial_analysis,inputs=[web_image,backends_options],\
|
214 |
+
outputs=[age_from_web,gender_from_web,race_from_web,emo_from_web])
|
215 |
+
|
216 |
+
|
217 |
+
gr.Markdown("")
|
218 |
+
|
219 |
+
|
220 |
+
demo.launch(debug=True,enable_queue=True)
|