Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- Logs/detectnameanddistance.py +46 -45
- main.py +3 -14
- requirements.txt +0 -2
Logs/detectnameanddistance.py
CHANGED
@@ -1,51 +1,52 @@
|
|
1 |
-
import
|
2 |
import numpy as np
|
3 |
def render(Session,input_name,output_name,FacesEmbedding,results,FaceImage,persons):
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
x_
|
7 |
-
y_=int(res[145].y*FaceImage.shape[0])
|
8 |
-
x2_=int(res[374].x*FaceImage.shape[1])
|
9 |
-
y2_=int(res[374].y*FaceImage.shape[0])
|
10 |
-
w=np.sqrt((x_-x2_)**2+(y_-y2_)**2)
|
11 |
-
W=6.3
|
12 |
-
f = 840
|
13 |
-
d = (W * f) / w
|
14 |
-
x=int(res[356].x*FaceImage.shape[1])
|
15 |
-
y=int(res[152].y*FaceImage.shape[0])
|
16 |
-
x2=int(res[162].x*FaceImage.shape[1])
|
17 |
-
y2=int(res[338].y*FaceImage.shape[0])
|
18 |
-
if x<FaceImage.shape[1]-10:
|
19 |
-
x+=10
|
20 |
-
if y>FaceImage.shape[0]-10:
|
21 |
-
y+=10
|
22 |
-
if x2>10:
|
23 |
-
x2-=10
|
24 |
-
if y2>10:
|
25 |
-
y2-=10
|
26 |
|
|
|
|
|
27 |
|
28 |
-
modelimg=FaceImage[y2:y,x2:x]
|
29 |
-
|
30 |
-
|
31 |
-
if modelimg.size<9:
|
32 |
-
continue
|
33 |
-
modelimg=cv2.resize(modelimg,(224,224)).astype(np.float32)
|
34 |
-
modelimg=modelimg/255
|
35 |
-
|
36 |
-
distances=[]
|
37 |
-
if d>0:
|
38 |
-
for index,name in enumerate(persons):
|
39 |
-
output=np.squeeze(Session.run([output_name],{f"{input_name}":np.expand_dims(modelimg,axis=0).astype(np.float16)})[0])
|
40 |
-
personimpeding=FacesEmbedding[name].values
|
41 |
-
distance=np.sum(np.power(output-personimpeding,2))
|
42 |
-
distances.append(distance)
|
43 |
-
name=persons[np.argmin(distances)]
|
44 |
-
distance=distances[np.argmin(distances)]
|
45 |
-
if distance <0.3:
|
46 |
-
return x_,y_,x2_,y2,name,d
|
47 |
-
|
48 |
-
else:
|
49 |
-
return x_,y_,x2_,y2,"UnKnow",d
|
50 |
-
|
51 |
|
|
|
1 |
+
from PIL import Image
|
2 |
import numpy as np
|
3 |
def render(Session,input_name,output_name,FacesEmbedding,results,FaceImage,persons):
|
4 |
+
res = results.face_landmarks[0]
|
5 |
+
x_=int(res[145].x*FaceImage.shape[1])
|
6 |
+
y_=int(res[145].y*FaceImage.shape[0])
|
7 |
+
x2_=int(res[374].x*FaceImage.shape[1])
|
8 |
+
y2_=int(res[374].y*FaceImage.shape[0])
|
9 |
+
w=np.sqrt((x_-x2_)**2+(y_-y2_)**2)
|
10 |
+
W=6.3
|
11 |
+
f = 840
|
12 |
+
d = (W * f) / w
|
13 |
+
x=int(res[356].x*FaceImage.shape[1])
|
14 |
+
y=int(res[152].y*FaceImage.shape[0])
|
15 |
+
x2=int(res[162].x*FaceImage.shape[1])
|
16 |
+
y2=int(res[338].y*FaceImage.shape[0])
|
17 |
+
if x<FaceImage.shape[1]-10:
|
18 |
+
x+=10
|
19 |
+
if y>FaceImage.shape[0]-10:
|
20 |
+
y+=10
|
21 |
+
if x2>10:
|
22 |
+
x2-=10
|
23 |
+
if y2>10:
|
24 |
+
y2-=10
|
25 |
+
|
26 |
+
|
27 |
+
modelimg=FaceImage[np.abs(y2):np.abs(y),np.abs(x2):np.abs(x)]
|
28 |
+
|
29 |
+
|
30 |
+
if modelimg.size<9:
|
31 |
+
return x_,y_,x2_,y2,"Image Size is Zero ",d
|
32 |
+
modelimg=np.array(Image.fromarray(modelimg.astype("uint8")).resize((224,224)))
|
33 |
+
|
34 |
+
modelimg=modelimg/255
|
35 |
+
|
36 |
+
distances=[]
|
37 |
+
|
38 |
+
for index,name in enumerate(persons):
|
39 |
+
output=np.squeeze(Session.run([output_name],{f"{input_name}":np.expand_dims(modelimg,axis=0).astype(np.float16)})[0])
|
40 |
+
personimpeding=FacesEmbedding[name].values
|
41 |
+
distance=np.sum(np.power(output-personimpeding,2))
|
42 |
+
distances.append(distance)
|
43 |
+
name=persons[np.argmin(distances)]
|
44 |
+
distance=distances[np.argmin(distances)]
|
45 |
+
if distance <0.3:
|
46 |
|
47 |
+
return x_,y_,x2_,y2,name,d
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
+
else:
|
50 |
+
return x_,y_,x2_,y2,"UnKnow",d
|
51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
main.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
from fastapi import FastAPI,UploadFile
|
2 |
from PIL import Image
|
3 |
-
|
4 |
import mediapipe as mp
|
5 |
import numpy as np
|
6 |
import pandas as pd
|
@@ -17,7 +17,7 @@ EP_list = [ 'CPUExecutionProvider']
|
|
17 |
Session = ort.InferenceSession(model_path,providers=EP_list)
|
18 |
input_name = Session.get_inputs()[0].name
|
19 |
output_name=Session.get_outputs()[0].name
|
20 |
-
MediapipeModelPath="
|
21 |
BaseOptions=mp.tasks.BaseOptions
|
22 |
FaceLandMarker=mp.tasks.vision.FaceLandmarker
|
23 |
FaceLandMarkerOptions=mp.tasks.vision.FaceLandmarkerOptions
|
@@ -36,18 +36,7 @@ async def detect(img:UploadFile):
|
|
36 |
result=landmarker.detect(mp_img)
|
37 |
|
38 |
if len(result.face_landmarks)==0:
|
39 |
-
return {"state":False,"message":"No Face Found","distance":0,"name":"null"}
|
40 |
x1,y1,x2,y2,name,distance=render(Session,input_name,output_name,FacesEmbedding,result,mp_img.numpy_view(),persons)
|
41 |
return {"state":True,"message":"null","distance":distance,"name":name,"x1":x1,"x2":x2,"y1":y1,"y2":y2}
|
42 |
|
43 |
-
# def getHighScore(Scores):
|
44 |
-
|
45 |
-
# if type(Scores)==dict:
|
46 |
-
# return False
|
47 |
-
# scorep=0.00
|
48 |
-
# label="0-10"
|
49 |
-
# for Score_ in Scores:
|
50 |
-
# if Score_["score"] > scorep:
|
51 |
-
# scorep=float(Score_["score"])
|
52 |
-
# label=Score_["label"]
|
53 |
-
# return label
|
|
|
1 |
from fastapi import FastAPI,UploadFile
|
2 |
from PIL import Image
|
3 |
+
|
4 |
import mediapipe as mp
|
5 |
import numpy as np
|
6 |
import pandas as pd
|
|
|
17 |
Session = ort.InferenceSession(model_path,providers=EP_list)
|
18 |
input_name = Session.get_inputs()[0].name
|
19 |
output_name=Session.get_outputs()[0].name
|
20 |
+
MediapipeModelPath=r"C:\Users\mf\Desktop\Final AI DIP Pro\mediapipemodels\face_landmarker.task"
|
21 |
BaseOptions=mp.tasks.BaseOptions
|
22 |
FaceLandMarker=mp.tasks.vision.FaceLandmarker
|
23 |
FaceLandMarkerOptions=mp.tasks.vision.FaceLandmarkerOptions
|
|
|
36 |
result=landmarker.detect(mp_img)
|
37 |
|
38 |
if len(result.face_landmarks)==0:
|
39 |
+
return {"state":False,"message":"No Face Found","distance":0,"name":"null","x1":0,"x2":0,"y1":0,"y2":0}
|
40 |
x1,y1,x2,y2,name,distance=render(Session,input_name,output_name,FacesEmbedding,result,mp_img.numpy_view(),persons)
|
41 |
return {"state":True,"message":"null","distance":distance,"name":name,"x1":x1,"x2":x2,"y1":y1,"y2":y2}
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -1,8 +1,6 @@
|
|
1 |
fastapi
|
2 |
mediapipe
|
3 |
numpy
|
4 |
-
opencv_contrib_python
|
5 |
-
opencv_python
|
6 |
pandas
|
7 |
uvicorn
|
8 |
Requests
|
|
|
1 |
fastapi
|
2 |
mediapipe
|
3 |
numpy
|
|
|
|
|
4 |
pandas
|
5 |
uvicorn
|
6 |
Requests
|