leonelhs commited on
Commit
5cbc6a9
·
1 Parent(s): 9f66e14

add models

Browse files
models/__pycache__/arcface_onnx.cpython-312.pyc ADDED
Binary file (4.74 kB). View file
 
models/__pycache__/attribute.cpython-312.pyc ADDED
Binary file (4.54 kB). View file
 
models/__pycache__/inswapper.cpython-312.pyc ADDED
Binary file (7.62 kB). View file
 
models/__pycache__/landmark.cpython-312.pyc ADDED
Binary file (6.04 kB). View file
 
models/__pycache__/retinaface.cpython-312.pyc ADDED
Binary file (13.3 kB). View file
 
models/arcface_onnx.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Organization : insightface.ai
3
+ # @Author : Jia Guo
4
+ # @Time : 2021-05-04
5
+ # @Function :
6
+
7
+ from __future__ import division
8
+ import numpy as np
9
+ import cv2
10
+ import onnx
11
+ import onnxruntime
12
+ from utils import face_align
13
+
14
+ __all__ = [
15
+ 'ArcFaceONNX',
16
+ ]
17
+
18
+
19
+ class ArcFaceONNX:
20
+ def __init__(self, model_file=None, session=None, ctx_id=0, **kwargs):
21
+ assert model_file is not None
22
+ self.model_file = model_file
23
+ self.session = session
24
+ self.taskname = 'recognition'
25
+ find_sub = False
26
+ find_mul = False
27
+ model = onnx.load(self.model_file)
28
+ graph = model.graph
29
+ for nid, node in enumerate(graph.node[:8]):
30
+ #print(nid, node.name)
31
+ if node.name.startswith('Sub') or node.name.startswith('_minus'):
32
+ find_sub = True
33
+ if node.name.startswith('Mul') or node.name.startswith('_mul'):
34
+ find_mul = True
35
+ if find_sub and find_mul:
36
+ #mxnet arcface model
37
+ input_mean = 0.0
38
+ input_std = 1.0
39
+ else:
40
+ input_mean = 127.5
41
+ input_std = 127.5
42
+ self.input_mean = input_mean
43
+ self.input_std = input_std
44
+ #print('input mean and std:', self.input_mean, self.input_std)
45
+ if self.session is None:
46
+ self.session = onnxruntime.InferenceSession(self.model_file, None)
47
+ input_cfg = self.session.get_inputs()[0]
48
+ input_shape = input_cfg.shape
49
+ input_name = input_cfg.name
50
+ self.input_size = tuple(input_shape[2:4][::-1])
51
+ self.input_shape = input_shape
52
+ outputs = self.session.get_outputs()
53
+ output_names = []
54
+ for out in outputs:
55
+ output_names.append(out.name)
56
+ self.input_name = input_name
57
+ self.output_names = output_names
58
+ assert len(self.output_names)==1
59
+ self.output_shape = outputs[0].shape
60
+
61
+ if ctx_id<0:
62
+ self.session.set_providers(['CPUExecutionProvider'])
63
+
64
+ def get(self, img, face):
65
+ aimg = face_align.norm_crop(img, landmark=face.kps, image_size=self.input_size[0])
66
+ face.embedding = self.get_feat(aimg).flatten()
67
+ return face.embedding
68
+
69
+ def compute_sim(self, feat1, feat2):
70
+ from numpy.linalg import norm
71
+ feat1 = feat1.ravel()
72
+ feat2 = feat2.ravel()
73
+ sim = np.dot(feat1, feat2) / (norm(feat1) * norm(feat2))
74
+ return sim
75
+
76
+ def get_feat(self, imgs):
77
+ if not isinstance(imgs, list):
78
+ imgs = [imgs]
79
+ input_size = self.input_size
80
+
81
+ blob = cv2.dnn.blobFromImages(imgs, 1.0 / self.input_std, input_size,
82
+ (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
83
+ net_out = self.session.run(self.output_names, {self.input_name: blob})[0]
84
+ return net_out
85
+
86
+ def forward(self, batch_data):
87
+ blob = (batch_data - self.input_mean) / self.input_std
88
+ net_out = self.session.run(self.output_names, {self.input_name: blob})[0]
89
+ return net_out
90
+
91
+
models/attribute.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Organization : insightface.ai
3
+ # @Author : Jia Guo
4
+ # @Time : 2021-06-19
5
+ # @Function :
6
+
7
+ from __future__ import division
8
+ import numpy as np
9
+ import cv2
10
+ import onnx
11
+ import onnxruntime
12
+ from utils import face_align
13
+
14
+ __all__ = [
15
+ 'Attribute',
16
+ ]
17
+
18
+
19
+ class Attribute:
20
+ def __init__(self, model_file=None, session=None, ctx_id=0, **kwargs):
21
+ assert model_file is not None
22
+ self.model_file = model_file
23
+ self.session = session
24
+ find_sub = False
25
+ find_mul = False
26
+ model = onnx.load(self.model_file)
27
+ graph = model.graph
28
+ for nid, node in enumerate(graph.node[:8]):
29
+ #print(nid, node.name)
30
+ if node.name.startswith('Sub') or node.name.startswith('_minus'):
31
+ find_sub = True
32
+ if node.name.startswith('Mul') or node.name.startswith('_mul'):
33
+ find_mul = True
34
+ if nid<3 and node.name=='bn_data':
35
+ find_sub = True
36
+ find_mul = True
37
+ if find_sub and find_mul:
38
+ #mxnet arcface model
39
+ input_mean = 0.0
40
+ input_std = 1.0
41
+ else:
42
+ input_mean = 127.5
43
+ input_std = 128.0
44
+ self.input_mean = input_mean
45
+ self.input_std = input_std
46
+ #print('input mean and std:', model_file, self.input_mean, self.input_std)
47
+ if self.session is None:
48
+ self.session = onnxruntime.InferenceSession(self.model_file, None)
49
+ input_cfg = self.session.get_inputs()[0]
50
+ input_shape = input_cfg.shape
51
+ input_name = input_cfg.name
52
+ self.input_size = tuple(input_shape[2:4][::-1])
53
+ self.input_shape = input_shape
54
+ outputs = self.session.get_outputs()
55
+ output_names = []
56
+ for out in outputs:
57
+ output_names.append(out.name)
58
+ self.input_name = input_name
59
+ self.output_names = output_names
60
+ assert len(self.output_names)==1
61
+ output_shape = outputs[0].shape
62
+ #print('init output_shape:', output_shape)
63
+ if output_shape[1]==3:
64
+ self.taskname = 'genderage'
65
+ else:
66
+ self.taskname = 'attribute_%d'%output_shape[1]
67
+
68
+ if ctx_id<0:
69
+ self.session.set_providers(['CPUExecutionProvider'])
70
+
71
+ def get(self, img, face):
72
+ bbox = face.bbox
73
+ w, h = (bbox[2] - bbox[0]), (bbox[3] - bbox[1])
74
+ center = (bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2
75
+ rotate = 0
76
+ _scale = self.input_size[0] / (max(w, h)*1.5)
77
+ #print('param:', img.shape, bbox, center, self.input_size, _scale, rotate)
78
+ aimg, M = face_align.transform(img, center, self.input_size[0], _scale, rotate)
79
+ input_size = tuple(aimg.shape[0:2][::-1])
80
+ #assert input_size==self.input_size
81
+ blob = cv2.dnn.blobFromImage(aimg, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
82
+ pred = self.session.run(self.output_names, {self.input_name : blob})[0][0]
83
+ if self.taskname=='genderage':
84
+ assert len(pred)==3
85
+ gender = np.argmax(pred[:2])
86
+ age = int(np.round(pred[2]*100))
87
+ face['gender'] = gender
88
+ face['age'] = age
89
+ return gender, age
90
+ else:
91
+ return pred
92
+
93
+
models/inswapper.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # https://github.com/deepinsight/insightface/blob/master/python-package/insightface/model_zoo/inswapper.py
3
+
4
+ import numpy as np
5
+ import onnxruntime
6
+ import cv2
7
+ import onnx
8
+ from onnx import numpy_helper
9
+ from utils import face_align
10
+
11
+
12
+ class INSwapper:
13
+ def __init__(self, model_file=None, session=None):
14
+ self.model_file = model_file
15
+ self.session = session
16
+ model = onnx.load(self.model_file)
17
+ graph = model.graph
18
+ self.emap = numpy_helper.to_array(graph.initializer[-1])
19
+ self.input_mean = 0.0
20
+ self.input_std = 255.0
21
+ #print('input mean and std:', model_file, self.input_mean, self.input_std)
22
+ if self.session is None:
23
+ self.session = onnxruntime.InferenceSession(self.model_file, None)
24
+ inputs = self.session.get_inputs()
25
+ self.input_names = []
26
+ for inp in inputs:
27
+ self.input_names.append(inp.name)
28
+ outputs = self.session.get_outputs()
29
+ output_names = []
30
+ for out in outputs:
31
+ output_names.append(out.name)
32
+ self.output_names = output_names
33
+ assert len(self.output_names)==1
34
+ output_shape = outputs[0].shape
35
+ input_cfg = inputs[0]
36
+ input_shape = input_cfg.shape
37
+ self.input_shape = input_shape
38
+ self.input_size = tuple(input_shape[2:4][::-1])
39
+
40
+ def forward(self, img, latent):
41
+ img = (img - self.input_mean) / self.input_std
42
+ pred = self.session.run(self.output_names, {self.input_names[0]: img, self.input_names[1]: latent})[0]
43
+ return pred
44
+
45
+ def get(self, img, target_face, source_face, paste_back=True):
46
+ aimg, M = face_align.norm_crop2(img, target_face.kps, self.input_size[0])
47
+ blob = cv2.dnn.blobFromImage(aimg, 1.0 / self.input_std, self.input_size,
48
+ (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
49
+ latent = source_face.normed_embedding.reshape((1,-1))
50
+ latent = np.dot(latent, self.emap)
51
+ latent /= np.linalg.norm(latent)
52
+ pred = self.session.run(self.output_names, {self.input_names[0]: blob, self.input_names[1]: latent})[0]
53
+ #print(latent.shape, latent.dtype, pred.shape)
54
+ img_fake = pred.transpose((0,2,3,1))[0]
55
+ bgr_fake = np.clip(255 * img_fake, 0, 255).astype(np.uint8)[:,:,::-1]
56
+ if not paste_back:
57
+ return bgr_fake, M
58
+ else:
59
+ target_img = img
60
+ fake_diff = bgr_fake.astype(np.float32) - aimg.astype(np.float32)
61
+ fake_diff = np.abs(fake_diff).mean(axis=2)
62
+ fake_diff[:2,:] = 0
63
+ fake_diff[-2:,:] = 0
64
+ fake_diff[:,:2] = 0
65
+ fake_diff[:,-2:] = 0
66
+ IM = cv2.invertAffineTransform(M)
67
+ img_white = np.full((aimg.shape[0],aimg.shape[1]), 255, dtype=np.float32)
68
+ bgr_fake = cv2.warpAffine(bgr_fake, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
69
+ img_white = cv2.warpAffine(img_white, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
70
+ fake_diff = cv2.warpAffine(fake_diff, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
71
+ img_white[img_white>20] = 255
72
+ fthresh = 10
73
+ fake_diff[fake_diff<fthresh] = 0
74
+ fake_diff[fake_diff>=fthresh] = 255
75
+ img_mask = img_white
76
+ mask_h_inds, mask_w_inds = np.where(img_mask==255)
77
+ mask_h = np.max(mask_h_inds) - np.min(mask_h_inds)
78
+ mask_w = np.max(mask_w_inds) - np.min(mask_w_inds)
79
+ mask_size = int(np.sqrt(mask_h*mask_w))
80
+ k = max(mask_size//10, 10)
81
+ #k = max(mask_size//20, 6)
82
+ #k = 6
83
+ kernel = np.ones((k,k),np.uint8)
84
+ img_mask = cv2.erode(img_mask,kernel,iterations = 1)
85
+ kernel = np.ones((2,2),np.uint8)
86
+ fake_diff = cv2.dilate(fake_diff,kernel,iterations = 1)
87
+ k = max(mask_size//20, 5)
88
+ #k = 3
89
+ #k = 3
90
+ kernel_size = (k, k)
91
+ blur_size = tuple(2*i+1 for i in kernel_size)
92
+ img_mask = cv2.GaussianBlur(img_mask, blur_size, 0)
93
+ k = 5
94
+ kernel_size = (k, k)
95
+ blur_size = tuple(2*i+1 for i in kernel_size)
96
+ fake_diff = cv2.GaussianBlur(fake_diff, blur_size, 0)
97
+ img_mask /= 255
98
+ fake_diff /= 255
99
+ #img_mask = fake_diff
100
+ img_mask = np.reshape(img_mask, [img_mask.shape[0],img_mask.shape[1],1])
101
+ fake_merged = img_mask * bgr_fake + (1-img_mask) * target_img.astype(np.float32)
102
+ fake_merged = fake_merged.astype(np.uint8)
103
+ return fake_merged
104
+
models/landmark.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Organization : insightface.ai
3
+ # @Author : Jia Guo
4
+ # @Time : 2021-05-04
5
+ # @Function :
6
+
7
+ from __future__ import division
8
+
9
+ import pickle
10
+
11
+ import cv2
12
+ import numpy as np
13
+ import onnx
14
+ import onnxruntime
15
+
16
+ from utils import face_align
17
+ from utils import transform
18
+
19
+ __all__ = [
20
+ 'Landmark',
21
+ ]
22
+
23
+
24
+ class Landmark:
25
+ def __init__(self, model_file=None, session=None, ctx_id=0, **kwargs):
26
+ assert model_file is not None
27
+ self.model_file = model_file
28
+ self.session = session
29
+ find_sub = False
30
+ find_mul = False
31
+ model = onnx.load(self.model_file)
32
+ graph = model.graph
33
+ for nid, node in enumerate(graph.node[:8]):
34
+ #print(nid, node.name)
35
+ if node.name.startswith('Sub') or node.name.startswith('_minus'):
36
+ find_sub = True
37
+ if node.name.startswith('Mul') or node.name.startswith('_mul'):
38
+ find_mul = True
39
+ if nid<3 and node.name=='bn_data':
40
+ find_sub = True
41
+ find_mul = True
42
+ if find_sub and find_mul:
43
+ #mxnet arcface model
44
+ input_mean = 0.0
45
+ input_std = 1.0
46
+ else:
47
+ input_mean = 127.5
48
+ input_std = 128.0
49
+ self.input_mean = input_mean
50
+ self.input_std = input_std
51
+ #print('input mean and std:', model_file, self.input_mean, self.input_std)
52
+ if self.session is None:
53
+ self.session = onnxruntime.InferenceSession(self.model_file, None)
54
+ input_cfg = self.session.get_inputs()[0]
55
+ input_shape = input_cfg.shape
56
+ input_name = input_cfg.name
57
+ self.input_size = tuple(input_shape[2:4][::-1])
58
+ self.input_shape = input_shape
59
+ outputs = self.session.get_outputs()
60
+ output_names = []
61
+ for out in outputs:
62
+ output_names.append(out.name)
63
+ self.input_name = input_name
64
+ self.output_names = output_names
65
+ assert len(self.output_names)==1
66
+ output_shape = outputs[0].shape
67
+ self.require_pose = False
68
+ #print('init output_shape:', output_shape)
69
+ if output_shape[1]==3309:
70
+ self.lmk_dim = 3
71
+ self.lmk_num = 68
72
+ with open("meanshape_68.pkl", 'rb') as f:
73
+ self.mean_lmk = pickle.load(f)
74
+ self.require_pose = True
75
+ else:
76
+ self.lmk_dim = 2
77
+ self.lmk_num = output_shape[1]//self.lmk_dim
78
+ self.taskname = 'landmark_%dd_%d'%(self.lmk_dim, self.lmk_num)
79
+
80
+ if ctx_id<0:
81
+ self.session.set_providers(['CPUExecutionProvider'])
82
+
83
+ def get(self, img, face):
84
+ bbox = face.bbox
85
+ w, h = (bbox[2] - bbox[0]), (bbox[3] - bbox[1])
86
+ center = (bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2
87
+ rotate = 0
88
+ _scale = self.input_size[0] / (max(w, h)*1.5)
89
+ #print('param:', img.shape, bbox, center, self.input_size, _scale, rotate)
90
+ aimg, M = face_align.transform(img, center, self.input_size[0], _scale, rotate)
91
+ input_size = tuple(aimg.shape[0:2][::-1])
92
+ #assert input_size==self.input_size
93
+ blob = cv2.dnn.blobFromImage(aimg, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
94
+ pred = self.session.run(self.output_names, {self.input_name : blob})[0][0]
95
+ if pred.shape[0] >= 3000:
96
+ pred = pred.reshape((-1, 3))
97
+ else:
98
+ pred = pred.reshape((-1, 2))
99
+ if self.lmk_num < pred.shape[0]:
100
+ pred = pred[self.lmk_num*-1:,:]
101
+ pred[:, 0:2] += 1
102
+ pred[:, 0:2] *= (self.input_size[0] // 2)
103
+ if pred.shape[1] == 3:
104
+ pred[:, 2] *= (self.input_size[0] // 2)
105
+
106
+ IM = cv2.invertAffineTransform(M)
107
+ pred = face_align.trans_points(pred, IM)
108
+ face[self.taskname] = pred
109
+ if self.require_pose:
110
+ P = transform.estimate_affine_matrix_3d23d(self.mean_lmk, pred)
111
+ s, R, t = transform.P2sRt(P)
112
+ rx, ry, rz = transform.matrix2angle(R)
113
+ pose = np.array( [rx, ry, rz], dtype=np.float32 )
114
+ face['pose'] = pose #pitch, yaw, roll
115
+ return pred
116
+
117
+
models/retinaface.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Organization : insightface.ai
3
+ # @Author : Jia Guo
4
+ # @Time : 2021-09-18
5
+ # @Function :
6
+
7
+ from __future__ import division
8
+
9
+ import os.path as osp
10
+
11
+ import cv2
12
+ import numpy as np
13
+ import onnxruntime
14
+
15
+
16
+ def softmax(z):
17
+ assert len(z.shape) == 2
18
+ s = np.max(z, axis=1)
19
+ s = s[:, np.newaxis] # necessary step to do broadcasting
20
+ e_x = np.exp(z - s)
21
+ div = np.sum(e_x, axis=1)
22
+ div = div[:, np.newaxis] # dito
23
+ return e_x / div
24
+
25
+ def distance2bbox(points, distance, max_shape=None):
26
+ """Decode distance prediction to bounding box.
27
+
28
+ Args:
29
+ points (Tensor): Shape (n, 2), [x, y].
30
+ distance (Tensor): Distance from the given point to 4
31
+ boundaries (left, top, right, bottom).
32
+ max_shape (tuple): Shape of the image.
33
+
34
+ Returns:
35
+ Tensor: Decoded bboxes.
36
+ """
37
+ x1 = points[:, 0] - distance[:, 0]
38
+ y1 = points[:, 1] - distance[:, 1]
39
+ x2 = points[:, 0] + distance[:, 2]
40
+ y2 = points[:, 1] + distance[:, 3]
41
+ if max_shape is not None:
42
+ x1 = x1.clamp(min=0, max=max_shape[1])
43
+ y1 = y1.clamp(min=0, max=max_shape[0])
44
+ x2 = x2.clamp(min=0, max=max_shape[1])
45
+ y2 = y2.clamp(min=0, max=max_shape[0])
46
+ return np.stack([x1, y1, x2, y2], axis=-1)
47
+
48
+ def distance2kps(points, distance, max_shape=None):
49
+ """Decode distance prediction to bounding box.
50
+
51
+ Args:
52
+ points (Tensor): Shape (n, 2), [x, y].
53
+ distance (Tensor): Distance from the given point to 4
54
+ boundaries (left, top, right, bottom).
55
+ max_shape (tuple): Shape of the image.
56
+
57
+ Returns:
58
+ Tensor: Decoded bboxes.
59
+ """
60
+ preds = []
61
+ for i in range(0, distance.shape[1], 2):
62
+ px = points[:, i%2] + distance[:, i]
63
+ py = points[:, i%2+1] + distance[:, i+1]
64
+ if max_shape is not None:
65
+ px = px.clamp(min=0, max=max_shape[1])
66
+ py = py.clamp(min=0, max=max_shape[0])
67
+ preds.append(px)
68
+ preds.append(py)
69
+ return np.stack(preds, axis=-1)
70
+
71
+ class RetinaFace:
72
+ def __init__(self, model_file=None, session=None, ctx_id=0, **kwargs):
73
+ self.input_size = None
74
+ self.model_file = model_file
75
+ self.session = session
76
+ self.taskname = 'detection'
77
+ if self.session is None:
78
+ assert self.model_file is not None
79
+ assert osp.exists(self.model_file)
80
+ self.session = onnxruntime.InferenceSession(self.model_file, None)
81
+ self.center_cache = {}
82
+ self.nms_thresh = 0.4
83
+ self.det_thresh = 0.5
84
+ self._init_vars()
85
+
86
+ if ctx_id<0:
87
+ self.session.set_providers(['CPUExecutionProvider'])
88
+ nms_thresh = kwargs.get('nms_thresh', None)
89
+ if nms_thresh is not None:
90
+ self.nms_thresh = nms_thresh
91
+ det_thresh = kwargs.get('det_thresh', None)
92
+ if det_thresh is not None:
93
+ self.det_thresh = det_thresh
94
+ input_size = kwargs.get('input_size', None)
95
+ if input_size is not None:
96
+ if self.input_size is not None:
97
+ print('warning: det_size is already set in detection model, ignore')
98
+ else:
99
+ self.input_size = input_size
100
+
101
+ def _init_vars(self):
102
+ input_cfg = self.session.get_inputs()[0]
103
+ input_shape = input_cfg.shape
104
+ #print(input_shape)
105
+ if isinstance(input_shape[2], str):
106
+ self.input_size = None
107
+ else:
108
+ self.input_size = tuple(input_shape[2:4][::-1])
109
+ #print('image_size:', self.image_size)
110
+ input_name = input_cfg.name
111
+ self.input_shape = input_shape
112
+ outputs = self.session.get_outputs()
113
+ output_names = []
114
+ for o in outputs:
115
+ output_names.append(o.name)
116
+ self.input_name = input_name
117
+ self.output_names = output_names
118
+ self.input_mean = 127.5
119
+ self.input_std = 128.0
120
+ #print(self.output_names)
121
+ #assert len(outputs)==10 or len(outputs)==15
122
+ self.use_kps = False
123
+ self._anchor_ratio = 1.0
124
+ self._num_anchors = 1
125
+ if len(outputs)==6:
126
+ self.fmc = 3
127
+ self._feat_stride_fpn = [8, 16, 32]
128
+ self._num_anchors = 2
129
+ elif len(outputs)==9:
130
+ self.fmc = 3
131
+ self._feat_stride_fpn = [8, 16, 32]
132
+ self._num_anchors = 2
133
+ self.use_kps = True
134
+ elif len(outputs)==10:
135
+ self.fmc = 5
136
+ self._feat_stride_fpn = [8, 16, 32, 64, 128]
137
+ self._num_anchors = 1
138
+ elif len(outputs)==15:
139
+ self.fmc = 5
140
+ self._feat_stride_fpn = [8, 16, 32, 64, 128]
141
+ self._num_anchors = 1
142
+ self.use_kps = True
143
+
144
+ def forward(self, img, threshold):
145
+ scores_list = []
146
+ bboxes_list = []
147
+ kpss_list = []
148
+ input_size = tuple(img.shape[0:2][::-1])
149
+ blob = cv2.dnn.blobFromImage(img, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
150
+ net_outs = self.session.run(self.output_names, {self.input_name : blob})
151
+
152
+ input_height = blob.shape[2]
153
+ input_width = blob.shape[3]
154
+ fmc = self.fmc
155
+ for idx, stride in enumerate(self._feat_stride_fpn):
156
+ scores = net_outs[idx]
157
+ bbox_preds = net_outs[idx+fmc]
158
+ bbox_preds = bbox_preds * stride
159
+ if self.use_kps:
160
+ kps_preds = net_outs[idx+fmc*2] * stride
161
+ height = input_height // stride
162
+ width = input_width // stride
163
+ K = height * width
164
+ key = (height, width, stride)
165
+ if key in self.center_cache:
166
+ anchor_centers = self.center_cache[key]
167
+ else:
168
+ #solution-1, c style:
169
+ #anchor_centers = np.zeros( (height, width, 2), dtype=np.float32 )
170
+ #for i in range(height):
171
+ # anchor_centers[i, :, 1] = i
172
+ #for i in range(width):
173
+ # anchor_centers[:, i, 0] = i
174
+
175
+ #solution-2:
176
+ #ax = np.arange(width, dtype=np.float32)
177
+ #ay = np.arange(height, dtype=np.float32)
178
+ #xv, yv = np.meshgrid(np.arange(width), np.arange(height))
179
+ #anchor_centers = np.stack([xv, yv], axis=-1).astype(np.float32)
180
+
181
+ #solution-3:
182
+ anchor_centers = np.stack(np.mgrid[:height, :width][::-1], axis=-1).astype(np.float32)
183
+ #print(anchor_centers.shape)
184
+
185
+ anchor_centers = (anchor_centers * stride).reshape( (-1, 2) )
186
+ if self._num_anchors>1:
187
+ anchor_centers = np.stack([anchor_centers]*self._num_anchors, axis=1).reshape( (-1,2) )
188
+ if len(self.center_cache)<100:
189
+ self.center_cache[key] = anchor_centers
190
+
191
+ pos_inds = np.where(scores>=threshold)[0]
192
+ bboxes = distance2bbox(anchor_centers, bbox_preds)
193
+ pos_scores = scores[pos_inds]
194
+ pos_bboxes = bboxes[pos_inds]
195
+ scores_list.append(pos_scores)
196
+ bboxes_list.append(pos_bboxes)
197
+ if self.use_kps:
198
+ kpss = distance2kps(anchor_centers, kps_preds)
199
+ #kpss = kps_preds
200
+ kpss = kpss.reshape( (kpss.shape[0], -1, 2) )
201
+ pos_kpss = kpss[pos_inds]
202
+ kpss_list.append(pos_kpss)
203
+ return scores_list, bboxes_list, kpss_list
204
+
205
+ def detect(self, img, input_size = None, max_num=0, metric='default'):
206
+ assert input_size is not None or self.input_size is not None
207
+ input_size = self.input_size if input_size is None else input_size
208
+
209
+ im_ratio = float(img.shape[0]) / img.shape[1]
210
+ model_ratio = float(input_size[1]) / input_size[0]
211
+ if im_ratio>model_ratio:
212
+ new_height = input_size[1]
213
+ new_width = int(new_height / im_ratio)
214
+ else:
215
+ new_width = input_size[0]
216
+ new_height = int(new_width * im_ratio)
217
+ det_scale = float(new_height) / img.shape[0]
218
+ resized_img = cv2.resize(img, (new_width, new_height))
219
+ det_img = np.zeros( (input_size[1], input_size[0], 3), dtype=np.uint8 )
220
+ det_img[:new_height, :new_width, :] = resized_img
221
+
222
+ scores_list, bboxes_list, kpss_list = self.forward(det_img, self.det_thresh)
223
+
224
+ scores = np.vstack(scores_list)
225
+ scores_ravel = scores.ravel()
226
+ order = scores_ravel.argsort()[::-1]
227
+ bboxes = np.vstack(bboxes_list) / det_scale
228
+ if self.use_kps:
229
+ kpss = np.vstack(kpss_list) / det_scale
230
+ pre_det = np.hstack((bboxes, scores)).astype(np.float32, copy=False)
231
+ pre_det = pre_det[order, :]
232
+ keep = self.nms(pre_det)
233
+ det = pre_det[keep, :]
234
+ if self.use_kps:
235
+ kpss = kpss[order,:,:]
236
+ kpss = kpss[keep,:,:]
237
+ else:
238
+ kpss = None
239
+ if max_num > 0 and det.shape[0] > max_num:
240
+ area = (det[:, 2] - det[:, 0]) * (det[:, 3] -
241
+ det[:, 1])
242
+ img_center = img.shape[0] // 2, img.shape[1] // 2
243
+ offsets = np.vstack([
244
+ (det[:, 0] + det[:, 2]) / 2 - img_center[1],
245
+ (det[:, 1] + det[:, 3]) / 2 - img_center[0]
246
+ ])
247
+ offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
248
+ if metric=='max':
249
+ values = area
250
+ else:
251
+ values = area - offset_dist_squared * 2.0 # some extra weight on the centering
252
+ bindex = np.argsort(
253
+ values)[::-1] # some extra weight on the centering
254
+ bindex = bindex[0:max_num]
255
+ det = det[bindex, :]
256
+ if kpss is not None:
257
+ kpss = kpss[bindex, :]
258
+ return det, kpss
259
+
260
+ def nms(self, dets):
261
+ thresh = self.nms_thresh
262
+ x1 = dets[:, 0]
263
+ y1 = dets[:, 1]
264
+ x2 = dets[:, 2]
265
+ y2 = dets[:, 3]
266
+ scores = dets[:, 4]
267
+
268
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
269
+ order = scores.argsort()[::-1]
270
+
271
+ keep = []
272
+ while order.size > 0:
273
+ i = order[0]
274
+ keep.append(i)
275
+ xx1 = np.maximum(x1[i], x1[order[1:]])
276
+ yy1 = np.maximum(y1[i], y1[order[1:]])
277
+ xx2 = np.minimum(x2[i], x2[order[1:]])
278
+ yy2 = np.minimum(y2[i], y2[order[1:]])
279
+
280
+ w = np.maximum(0.0, xx2 - xx1 + 1)
281
+ h = np.maximum(0.0, yy2 - yy1 + 1)
282
+ inter = w * h
283
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
284
+
285
+ inds = np.where(ovr <= thresh)[0]
286
+ order = order[inds + 1]
287
+
288
+ return keep