Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- Logs/detectnameanddistance.py +51 -0
- main.py +53 -0
- requirements.txt +10 -0
Logs/detectnameanddistance.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
def render(Session,input_name,output_name,FacesEmbedding,results,FaceImage,persons):
|
4 |
+
for res in results.face_landmarks:
|
5 |
+
|
6 |
+
x_=int(res[145].x*FaceImage.shape[1])
|
7 |
+
y_=int(res[145].y*FaceImage.shape[0])
|
8 |
+
x2_=int(res[374].x*FaceImage.shape[1])
|
9 |
+
y2_=int(res[374].y*FaceImage.shape[0])
|
10 |
+
w=np.sqrt((x_-x2_)**2+(y_-y2_)**2)
|
11 |
+
W=6.3
|
12 |
+
f = 840
|
13 |
+
d = (W * f) / w
|
14 |
+
x=int(res[356].x*FaceImage.shape[1])
|
15 |
+
y=int(res[152].y*FaceImage.shape[0])
|
16 |
+
x2=int(res[162].x*FaceImage.shape[1])
|
17 |
+
y2=int(res[338].y*FaceImage.shape[0])
|
18 |
+
if x<FaceImage.shape[1]-10:
|
19 |
+
x+=10
|
20 |
+
if y>FaceImage.shape[0]-10:
|
21 |
+
y+=10
|
22 |
+
if x2>10:
|
23 |
+
x2-=10
|
24 |
+
if y2>10:
|
25 |
+
y2-=10
|
26 |
+
|
27 |
+
|
28 |
+
modelimg=FaceImage[y2:y,x2:x]
|
29 |
+
|
30 |
+
|
31 |
+
if modelimg.size<9:
|
32 |
+
continue
|
33 |
+
modelimg=cv2.resize(modelimg,(224,224)).astype(np.float32)
|
34 |
+
modelimg=modelimg/255
|
35 |
+
|
36 |
+
distances=[]
|
37 |
+
if d>0:
|
38 |
+
for index,name in enumerate(persons):
|
39 |
+
output=np.squeeze(Session.run([output_name],{f"{input_name}":np.expand_dims(modelimg,axis=0).astype(np.float16)})[0])
|
40 |
+
personimpeding=FacesEmbedding[name].values
|
41 |
+
distance=np.sum(np.power(output-personimpeding,2))
|
42 |
+
distances.append(distance)
|
43 |
+
name=persons[np.argmin(distances)]
|
44 |
+
distance=distances[np.argmin(distances)]
|
45 |
+
if distance <0.3:
|
46 |
+
return x_,y_,x2_,y2,name,d
|
47 |
+
|
48 |
+
else:
|
49 |
+
return x_,y_,x2_,y2,"UnKnow",d
|
50 |
+
|
51 |
+
|
main.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI,UploadFile
|
2 |
+
from PIL import Image
|
3 |
+
import cv2
|
4 |
+
import mediapipe as mp
|
5 |
+
import numpy as np
|
6 |
+
import pandas as pd
|
7 |
+
from io import BytesIO
|
8 |
+
import onnxruntime as ort
|
9 |
+
from Logs.detectnameanddistance import render
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
FacesEmbedding=pd.read_csv("./Models/FacesMeanEmbeddings.csv",index_col=0)
|
14 |
+
persons=list(FacesEmbedding.columns)
|
15 |
+
model_path="./Models/FaceModelV5.onnx"
|
16 |
+
EP_list = [ 'CPUExecutionProvider']
|
17 |
+
Session = ort.InferenceSession(model_path,providers=EP_list)
|
18 |
+
input_name = Session.get_inputs()[0].name
|
19 |
+
output_name=Session.get_outputs()[0].name
|
20 |
+
MediapipeModelPath="./Models/face_landmarker.task"
|
21 |
+
BaseOptions=mp.tasks.BaseOptions
|
22 |
+
FaceLandMarker=mp.tasks.vision.FaceLandmarker
|
23 |
+
FaceLandMarkerOptions=mp.tasks.vision.FaceLandmarkerOptions
|
24 |
+
VisionRunningMode=mp.tasks.vision.RunningMode
|
25 |
+
FaceLandMarkerResult=mp.tasks.vision.FaceLandmarkerResult
|
26 |
+
options=FaceLandMarkerOptions(base_options=BaseOptions(model_asset_path=MediapipeModelPath),running_mode=VisionRunningMode.IMAGE)
|
27 |
+
landmarker= FaceLandMarker.create_from_options(options)
|
28 |
+
|
29 |
+
App=FastAPI()
|
30 |
+
|
31 |
+
@App.post("/upload")
|
32 |
+
async def detect(img:UploadFile):
|
33 |
+
|
34 |
+
image=np.array(Image.open(BytesIO(img.file.read())))
|
35 |
+
mp_img=mp.Image(image_format=mp.ImageFormat.SRGB,data=image)
|
36 |
+
result=landmarker.detect(mp_img)
|
37 |
+
|
38 |
+
if len(result.face_landmarks)==0:
|
39 |
+
return {"state":False,"message":"No Face Found","distance":0,"name":"null"}
|
40 |
+
x1,y1,x2,y2,name,distance=render(Session,input_name,output_name,FacesEmbedding,result,mp_img.numpy_view(),persons)
|
41 |
+
return {"state":True,"message":"null","distance":distance,"name":name,"x1":x1,"x2":x2,"y1":y1,"y2":y2}
|
42 |
+
|
43 |
+
# def getHighScore(Scores):
|
44 |
+
|
45 |
+
# if type(Scores)==dict:
|
46 |
+
# return False
|
47 |
+
# scorep=0.00
|
48 |
+
# label="0-10"
|
49 |
+
# for Score_ in Scores:
|
50 |
+
# if Score_["score"] > scorep:
|
51 |
+
# scorep=float(Score_["score"])
|
52 |
+
# label=Score_["label"]
|
53 |
+
# return label
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
mediapipe
|
3 |
+
numpy
|
4 |
+
opencv_contrib_python
|
5 |
+
opencv_python
|
6 |
+
pandas
|
7 |
+
uvicorn
|
8 |
+
Requests
|
9 |
+
onnxruntime
|
10 |
+
python-multipart
|