File size: 1,738 Bytes
12aa86f
 
520fb27
12aa86f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
520fb27
12aa86f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
520fb27
12aa86f
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
from fastapi import FastAPI,UploadFile
from PIL import Image

import mediapipe as mp
import numpy as np
import pandas as pd
from io import BytesIO
import onnxruntime as ort
from Logs.detectnameanddistance import render



FacesEmbedding=pd.read_csv("./Models/FacesMeanEmbeddings.csv",index_col=0)
persons=list(FacesEmbedding.columns)
model_path="./Models/FaceModelV5.onnx"
EP_list = [ 'CPUExecutionProvider']
Session = ort.InferenceSession(model_path,providers=EP_list)
input_name = Session.get_inputs()[0].name
output_name=Session.get_outputs()[0].name
MediapipeModelPath=r"C:\Users\mf\Desktop\Final AI DIP Pro\mediapipemodels\face_landmarker.task"
BaseOptions=mp.tasks.BaseOptions
FaceLandMarker=mp.tasks.vision.FaceLandmarker
FaceLandMarkerOptions=mp.tasks.vision.FaceLandmarkerOptions
VisionRunningMode=mp.tasks.vision.RunningMode
FaceLandMarkerResult=mp.tasks.vision.FaceLandmarkerResult
options=FaceLandMarkerOptions(base_options=BaseOptions(model_asset_path=MediapipeModelPath),running_mode=VisionRunningMode.IMAGE)
landmarker= FaceLandMarker.create_from_options(options)

App=FastAPI()

@App.post("/upload")
async def detect(img:UploadFile):
    
    image=np.array(Image.open(BytesIO(img.file.read())))
    mp_img=mp.Image(image_format=mp.ImageFormat.SRGB,data=image)
    result=landmarker.detect(mp_img)
    
    if len(result.face_landmarks)==0:
        return {"state":False,"message":"No Face Found","distance":0,"name":"null","x1":0,"x2":0,"y1":0,"y2":0}
    x1,y1,x2,y2,name,distance=render(Session,input_name,output_name,FacesEmbedding,result,mp_img.numpy_view(),persons)
    return {"state":True,"message":"null","distance":distance,"name":name,"x1":x1,"x2":x2,"y1":y1,"y2":y2}