jezzadebate commited on
Commit
b5d999a
·
1 Parent(s): 363c142

Initial commit

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .DS_Store
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9-slim-bullseye
2
+
3
+ RUN pip install --upgrade pip
4
+ RUN pip install facenet-pytorch
5
+
6
+ # Create working directory
7
+ RUN mkdir -p /usr/src/app
8
+ WORKDIR /usr/src/app
9
+
10
+ # Copy weights
11
+ ENV TORCH_HOME=/weights
12
+ COPY /weights /weights
13
+
14
+ # Copy source code
15
+ COPY face.py /usr/src/app
16
+
17
+ ENTRYPOINT ["python3", "/usr/src/app/face.py", "--input", "/in", "--output", "/out"]
face.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ from facenet_pytorch import MTCNN, InceptionResnetV1
4
+ from PIL import Image
5
+
6
+ # If required, create a face detection pipeline using MTCNN:
7
+ mtcnn = MTCNN(image_size=400, margin=150)
8
+
9
+ # Create an inception resnet (in eval mode):
10
+ resnet = InceptionResnetV1(pretrained='vggface2').eval()
11
+
12
+ def process(in_file, out_file, box=None):
13
+ img = Image.open(in_file)
14
+
15
+ if box is None:
16
+ boxes, probs = mtcnn.detect(img)
17
+
18
+ if boxes is None:
19
+ print("Face not found, using default box")
20
+ boxes = [[0,0,img.size[0],img.size[0]]]
21
+ else:
22
+ boxes = sorted(zip(probs, boxes), reverse=True)
23
+ boxes = [box[1] for box in boxes]
24
+
25
+ box = boxes[0]
26
+
27
+ img_pad = 25
28
+
29
+ box_l = int(box[0]) - img_pad
30
+ box_t = int(box[1]) - img_pad
31
+ box_r = int(box[2]) + img_pad
32
+ box_b = int(box[3]) + img_pad
33
+
34
+ # normalize box coordinates
35
+ box_l = max(0, box_l)
36
+ box_t = max(0, box_t)
37
+ box_r = min(img.size[0], box_r)
38
+ box_b = min(img.size[1], box_b)
39
+
40
+ # calculate box width and height
41
+ box_w = int(box_r-box_l)
42
+ box_h = int(box_b-box_t)
43
+
44
+ print("image size", img.size)
45
+ print("original box", (box_l, box_t, box_r, box_b))
46
+ print("original box size", box_w, "x", box_h)
47
+
48
+ # find the smaller dimension
49
+ box_d = min(box_w, box_h)
50
+
51
+ # adjust box coordinates to be square
52
+ box_l = int(box_l + (box_w - box_d)/2)
53
+ box_t = int(box_t + (box_h - box_d)/2)
54
+ box_r = int(box_l + box_d)
55
+ box_b = int(box_t + box_d)
56
+
57
+ box_w = int(box_r-box_l)
58
+ box_h = int(box_b-box_t)
59
+
60
+ print("adjusted box", (box_l, box_t, box_r, box_b))
61
+ print("adjusted size", box_w, "x", box_h)
62
+
63
+ im_new = img.crop((box_l, box_t, box_r, box_b)).resize((300,300), Image.Resampling.LANCZOS)
64
+ im_new.save(out_file)
65
+
66
+ def auto_crop(input_dir, output_dir):
67
+ if os.path.isdir(output_dir) == False:
68
+ print("Error: output directory does not exist")
69
+ return
70
+ # iterate over all files in the input directory
71
+ if os.path.isdir(input_dir):
72
+ for file in os.listdir(input_dir):
73
+ try:
74
+ in_file = os.path.join(input_dir, file)
75
+ out_file = os.path.join(output_dir, file)
76
+ print("Processing file", in_file)
77
+ process(in_file, out_file)
78
+ except KeyboardInterrupt:
79
+ raise
80
+ except:
81
+ print("Error processing file", file)
82
+ else:
83
+ path, file = os.path.split(input_dir)
84
+ print("Processing file", file)
85
+ out_file = os.path.join(output_dir, file)
86
+ process(input_dir, out_file)
87
+
88
+ if __name__ == '__main__':
89
+ parser = argparse.ArgumentParser(description="Batch Auto Cropping")
90
+ parser.add_argument('-i', '--input', help='Input folder', required=True)
91
+ parser.add_argument('-o', '--output', help='Output folder', required=True)
92
+ args = parser.parse_args()
93
+ auto_crop(args.input, args.output)
weights/checkpoints/20180402-114759-vggface2.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:281cebca8662831adb987a874bdcb36e73f5b1c6dc5ee5878f305e985625d99b
3
+ size 111898327