echen01 commited on
Commit
c00162e
·
1 Parent(s): dd1add1

add face detector

Browse files
align_all_parallel.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ brief: face alignment with FFHQ method (https://github.com/NVlabs/ffhq-dataset)
3
+ author: lzhbrian (https://lzhbrian.me)
4
+ date: 2020.1.5
5
+ note: code is heavily borrowed from
6
+ https://github.com/NVlabs/ffhq-dataset
7
+ http://dlib.net/face_landmark_detection.py.html
8
+
9
+ requirements:
10
+ apt install cmake
11
+ conda install Pillow numpy scipy
12
+ pip install dlib
13
+ # download face landmark model from:
14
+ # http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
15
+ """
16
+ from argparse import ArgumentParser
17
+ import time
18
+ import numpy as np
19
+ import PIL
20
+ import PIL.Image
21
+ import os
22
+ import scipy
23
+ import scipy.ndimage
24
+ import dlib
25
+ import multiprocessing as mp
26
+ import math
27
+
28
+
29
+ SHAPE_PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
30
+
31
+
32
+ def get_landmark(filepath, predictor, i=None):
33
+ """get landmark with dlib
34
+ :return: np.array shape=(68, 2)
35
+ """
36
+ detector = dlib.get_frontal_face_detector()
37
+
38
+ img = dlib.load_rgb_image(filepath)
39
+ dets = detector(img, 1)
40
+
41
+ #for k, d in enumerate(dets):
42
+ if i is None:
43
+ i = len(dets) - 1
44
+ try:
45
+ shape = predictor(img, dets[i])
46
+ except IndexError:
47
+ print("Face not found")
48
+ return
49
+ t = list(shape.parts())
50
+ a = []
51
+ for tt in t:
52
+ a.append([tt.x, tt.y])
53
+ lm = np.array(a)
54
+ return lm
55
+
56
+
57
+ def align_face(filepath, predictor, idx=None):
58
+ """
59
+ :param filepath: str
60
+ :return: PIL Image
61
+ """
62
+
63
+ lm = get_landmark(filepath, predictor, i=idx)
64
+
65
+ lm_chin = lm[0:17] # left-right
66
+ lm_eyebrow_left = lm[17:22] # left-right
67
+ lm_eyebrow_right = lm[22:27] # left-right
68
+ lm_nose = lm[27:31] # top-down
69
+ lm_nostrils = lm[31:36] # top-down
70
+ lm_eye_left = lm[36:42] # left-clockwise
71
+ lm_eye_right = lm[42:48] # left-clockwise
72
+ lm_mouth_outer = lm[48:60] # left-clockwise
73
+ lm_mouth_inner = lm[60:68] # left-clockwise
74
+
75
+ # Calculate auxiliary vectors.
76
+ eye_left = np.mean(lm_eye_left, axis=0)
77
+ eye_right = np.mean(lm_eye_right, axis=0)
78
+ eye_avg = (eye_left + eye_right) * 0.5
79
+ eye_to_eye = eye_right - eye_left
80
+ mouth_left = lm_mouth_outer[0]
81
+ mouth_right = lm_mouth_outer[6]
82
+ mouth_avg = (mouth_left + mouth_right) * 0.5
83
+ eye_to_mouth = mouth_avg - eye_avg
84
+
85
+ # Choose oriented crop rectangle.
86
+ x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
87
+ x /= np.hypot(*x)
88
+ x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
89
+ y = np.flipud(x) * [-1, 1]
90
+ c = eye_avg + eye_to_mouth * 0.1
91
+ quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
92
+ qsize = np.hypot(*x) * 2
93
+
94
+ # read image
95
+ img = PIL.Image.open(filepath)
96
+
97
+ output_size = 256
98
+ transform_size = 256
99
+ enable_padding = True
100
+
101
+ # Shrink.
102
+ shrink = int(np.floor(qsize / output_size * 0.5))
103
+ if shrink > 1:
104
+ rsize = (
105
+ int(np.rint(float(img.size[0]) / shrink)),
106
+ int(np.rint(float(img.size[1]) / shrink)),
107
+ )
108
+ img = img.resize(rsize, PIL.Image.ANTIALIAS)
109
+ quad /= shrink
110
+ qsize /= shrink
111
+
112
+ # Crop.
113
+ border = max(int(np.rint(qsize * 0.1)), 3)
114
+ crop = (
115
+ int(np.floor(min(quad[:, 0]))),
116
+ int(np.floor(min(quad[:, 1]))),
117
+ int(np.ceil(max(quad[:, 0]))),
118
+ int(np.ceil(max(quad[:, 1]))),
119
+ )
120
+ crop = (
121
+ max(crop[0] - border, 0),
122
+ max(crop[1] - border, 0),
123
+ min(crop[2] + border, img.size[0]),
124
+ min(crop[3] + border, img.size[1]),
125
+ )
126
+ if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
127
+ img = img.crop(crop)
128
+ quad -= crop[0:2]
129
+
130
+ # Pad.
131
+ pad = (
132
+ int(np.floor(min(quad[:, 0]))),
133
+ int(np.floor(min(quad[:, 1]))),
134
+ int(np.ceil(max(quad[:, 0]))),
135
+ int(np.ceil(max(quad[:, 1]))),
136
+ )
137
+ pad = (
138
+ max(-pad[0] + border, 0),
139
+ max(-pad[1] + border, 0),
140
+ max(pad[2] - img.size[0] + border, 0),
141
+ max(pad[3] - img.size[1] + border, 0),
142
+ )
143
+ if enable_padding and max(pad) > border - 4:
144
+ pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
145
+ img = np.pad(
146
+ np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), "reflect"
147
+ )
148
+ h, w, _ = img.shape
149
+ y, x, _ = np.ogrid[:h, :w, :1]
150
+ mask = np.maximum(
151
+ 1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
152
+ 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]),
153
+ )
154
+ blur = qsize * 0.02
155
+ img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(
156
+ mask * 3.0 + 1.0, 0.0, 1.0
157
+ )
158
+ img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
159
+ img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), "RGB")
160
+ quad += pad[:2]
161
+
162
+ # Transform.
163
+ img = img.transform(
164
+ (transform_size, transform_size),
165
+ PIL.Image.QUAD,
166
+ (quad + 0.5).flatten(),
167
+ PIL.Image.BILINEAR,
168
+ )
169
+ if output_size < transform_size:
170
+ img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
171
+
172
+ # Save aligned image.
173
+ return img
174
+
175
+
176
+ def chunks(lst, n):
177
+ """Yield successive n-sized chunks from lst."""
178
+ for i in range(0, len(lst), n):
179
+ yield lst[i : i + n]
180
+
181
+
182
+ def extract_on_paths(file_paths):
183
+ predictor = dlib.shape_predictor(SHAPE_PREDICTOR_PATH)
184
+ pid = mp.current_process().name
185
+ print(f"\t{pid} is starting to extract on #{len(file_paths)} images")
186
+ tot_count = len(file_paths)
187
+ count = 0
188
+ for file_path, res_path in file_paths:
189
+ count += 1
190
+ if count % 100 == 0:
191
+ print(f"{pid} done with {count}/{tot_count}")
192
+ try:
193
+ res = align_face(file_path, predictor)
194
+ res = res.convert("RGB")
195
+ os.makedirs(os.path.dirname(res_path), exist_ok=True)
196
+ res.save(res_path)
197
+ except Exception:
198
+ continue
199
+ print("\tDone!")
200
+
201
+
202
+ def parse_args():
203
+ parser = ArgumentParser(add_help=False)
204
+ parser.add_argument("--num_threads", type=int, default=1)
205
+ parser.add_argument("--root_path", type=str, default="")
206
+ args = parser.parse_args()
207
+ return args
208
+
209
+
210
+ def run(args):
211
+ root_path = args.root_path
212
+ out_crops_path = root_path + "_crops"
213
+ if not os.path.exists(out_crops_path):
214
+ os.makedirs(out_crops_path, exist_ok=True)
215
+
216
+ file_paths = []
217
+ for root, dirs, files in os.walk(root_path):
218
+ for file in files:
219
+ file_path = os.path.join(root, file)
220
+ fname = os.path.join(out_crops_path, os.path.relpath(file_path, root_path))
221
+ res_path = f"{os.path.splitext(fname)[0]}.jpg"
222
+ if os.path.splitext(file_path)[1] == ".txt" or os.path.exists(res_path):
223
+ continue
224
+ file_paths.append((file_path, res_path))
225
+
226
+ file_chunks = list(
227
+ chunks(file_paths, int(math.ceil(len(file_paths) / args.num_threads)))
228
+ )
229
+ print(len(file_chunks))
230
+ pool = mp.Pool(args.num_threads)
231
+ print(f"Running on {len(file_paths)} paths\nHere we goooo")
232
+ tic = time.time()
233
+ pool.map(extract_on_paths, file_chunks)
234
+ toc = time.time()
235
+ print(f"Mischief managed in {str(toc - tic)}s")
236
+
237
+
238
+ if __name__ == "__main__":
239
+ args = parse_args()
240
+ run(args)
app.py CHANGED
@@ -23,11 +23,14 @@ def run_alignment(image_path,idx=None):
23
  predictor = dlib.shape_predictor("pretrained_models/shape_predictor_68_face_landmarks.dat")
24
  aligned_image = align_face(filepath=image_path, predictor=predictor, idx=idx)
25
  print("Aligned image has shape: {}".format(aligned_image.size))
 
26
  return aligned_image
27
 
28
  def predict(inp):
29
  #with torch.no_grad():
30
- return inp
 
 
31
 
32
 
33
  gr.Interface(fn=predict,
 
23
  predictor = dlib.shape_predictor("pretrained_models/shape_predictor_68_face_landmarks.dat")
24
  aligned_image = align_face(filepath=image_path, predictor=predictor, idx=idx)
25
  print("Aligned image has shape: {}".format(aligned_image.size))
26
+
27
  return aligned_image
28
 
29
  def predict(inp):
30
  #with torch.no_grad():
31
+ inp.save("imgs/input.png")
32
+ out = run_alignment("imgs/input.png", idx=0)
33
+ return out
34
 
35
 
36
  gr.Interface(fn=predict,
pretrained_models/shape_predictor_68_face_landmarks.dat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbdc2cb80eb9aa7a758672cbfdda32ba6300efe9b6e6c7a299ff7e736b11b92f
3
+ size 99693937