from fastapi import FastAPI,UploadFile from PIL import Image import mediapipe as mp import numpy as np import pandas as pd from io import BytesIO import onnxruntime as ort from Logs.detectnameanddistance import render FacesEmbedding=pd.read_csv("./Models/FacesMeanEmbeddings.csv",index_col=0) persons=list(FacesEmbedding.columns) model_path="./Models/FaceModelV5.onnx" EP_list = [ 'CPUExecutionProvider'] Session = ort.InferenceSession(model_path,providers=EP_list) input_name = Session.get_inputs()[0].name output_name=Session.get_outputs()[0].name MediapipeModelPath="./Models/face_landmarker.task" BaseOptions=mp.tasks.BaseOptions FaceLandMarker=mp.tasks.vision.FaceLandmarker FaceLandMarkerOptions=mp.tasks.vision.FaceLandmarkerOptions VisionRunningMode=mp.tasks.vision.RunningMode FaceLandMarkerResult=mp.tasks.vision.FaceLandmarkerResult options=FaceLandMarkerOptions(base_options=BaseOptions(model_asset_path=MediapipeModelPath),running_mode=VisionRunningMode.IMAGE) landmarker= FaceLandMarker.create_from_options(options) App=FastAPI() @App.post("/upload") async def detect(img:UploadFile): image=np.array(Image.open(BytesIO(img.file.read()))) mp_img=mp.Image(image_format=mp.ImageFormat.SRGB,data=image) result=landmarker.detect(mp_img) if len(result.face_landmarks)==0: return {"state":False,"message":"No Face Found","distance":0,"name":"null","x1":0,"x2":0,"y1":0,"y2":0} x1,y1,x2,y2,name,distance=render(Session,input_name,output_name,FacesEmbedding,result,mp_img.numpy_view(),persons) return {"state":True,"message":"null","distance":distance,"name":name,"x1":x1,"x2":x2,"y1":y1,"y2":y2}