Perfectfox256 commited on
Commit
fb49328
·
verified ·
1 Parent(s): c91e50c

upload y11 models and script

Browse files
yolo-07-2025/eyes-detection-01-fp32.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edaf66727e5e98a62a8d3602b46d8fdcc8d999c7cf64a7c8d139c13fd6a90f60
3
+ size 10457446
yolo-07-2025/eyes-detection-01.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b63370cdce9dac61417c40e7fb0da8753b3347c0e9d6fe67554c7a4c7f2bc79b
3
+ size 5272929
yolo-07-2025/hs-real-anime-y11n-320-fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6519f33a6032006867b96a0a8e9ca3069a900b9a2ba1a0670f7669c313d2d7b
3
+ size 5279134
yolo-07-2025/hs-real-anime-y11n-320-fp32.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8874ef5b98a60fee1579a1724665168c8334d424ed2d8344026dccfdf354d26e
3
+ size 10469501
yolo-07-2025/hs-real-anime-y11n-640-fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e26ff81a9295755fac7568e8c1531e9757abf2bfbcac8f79c4526838e9e02572
3
+ size 5342193
yolo-07-2025/hs-real-anime-y11n-640-fp32.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fecf08b62cf091df6b46222e163ac1721570d5f0a9cffa93591e5bb46f61bccf
3
+ size 10595556
yolo-07-2025/hs-real-anime-y11s-320-fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c2aeb9d199a9bdf0facb23ee8c878235347eb25e134b8162638dbfc5329a772
3
+ size 18946830
yolo-07-2025/hs-real-anime-y11s-320-fp32.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7980bf8f1732f16466bc6c5997db2c9685422045a021536697a0de7845940d1b
3
+ size 37804611
yolo-07-2025/hs-real-anime-y11s-640-fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3f3e85afff182b7a4157e1fca37b61e63c7eee315a3f47a9112f2f9919c44c5
3
+ size 19009889
yolo-07-2025/hs-real-anime-y11s-640-fp32.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd2f67c628adb20ab2f1ffdd45c37015103f6fa6140e1a6a942dbbc3121a44a9
3
+ size 37930666
yolo-07-2025/hs-real-y11n-320-fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:baeb88f900efef2b1b1d996ca8dc9bf28514f8db0d7ce6b3e94e1ea09cd12b9e
3
+ size 5279134
yolo-07-2025/hs-real-y11n-320-fp32.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39fd6000eeb729bfd5489f897f38eac6dca67c1008b320a7ed032a250e28b444
3
+ size 10469501
yolo-07-2025/hs-real-y11n-640-fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c8439d90a684247cded62c1de4f081a16d6ddcf069c40fa2fc967a79ba99f09
3
+ size 5342193
yolo-07-2025/hs-real-y11n-640-fp32.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2b5adff81442762a93f78cfdad4b492471c92fa4e60fc6d553afffd0dc892d5
3
+ size 10595556
yolo-07-2025/labelize-folder.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import numpy as np
4
+ import onnxruntime as ort
5
+
6
+
7
+ def letterbox(image, new_shape=(320, 320), color=(114, 114, 114)):
8
+ """
9
+ Resize image to a target size with unchanged aspect ratio using padding.
10
+ Returns the resized image, and scaling/padding info to map boxes back.
11
+ """
12
+ shape = image.shape[:2] # current shape [height, width]
13
+ r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
14
+ ratio = (r, r)
15
+ new_unpad = (int(round(shape[1] * r)), int(round(shape[0] * r)))
16
+ dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]
17
+ dw /= 2
18
+ dh /= 2
19
+
20
+ # resize image
21
+ resized = cv2.resize(image, new_unpad, interpolation=cv2.INTER_LINEAR)
22
+ # add border
23
+ top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
24
+ left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
25
+ padded = cv2.copyMakeBorder(resized, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
26
+ return padded, ratio, (dw, dh)
27
+
28
+
29
+ def preprocess(image, input_shape):
30
+ """
31
+ Preprocess image for model inference: letterbox, BGR to RGB, transpose, normalize.
32
+ """
33
+ img, ratio, (dw, dh) = letterbox(image, new_shape=input_shape)
34
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
35
+ img = img.astype(np.float32) / 255.0
36
+ img = img.transpose(2, 0, 1)
37
+ img = np.expand_dims(img, 0)
38
+ return img, ratio, (dw, dh)
39
+
40
+
41
+ def postprocess(prediction, image_shape, ratio, pad, score_threshold=0.2, iou_threshold=0.45):
42
+ """
43
+ Convert model output to bounding boxes, apply threshold and OpenCV NMS.
44
+ Returns list of (class_id, score, box) in original image coords.
45
+ """
46
+ pred = prediction[0] # (4+nc, N)
47
+ nc = pred.shape[0] - 4
48
+ pred = pred.transpose(1, 0) # (N, 4+nc)
49
+
50
+ # Extract boxes and scores
51
+ boxes = pred[:, :4]
52
+ scores_all = pred[:, 4:]
53
+ class_ids = np.argmax(scores_all, axis=1)
54
+ confidences = scores_all[np.arange(len(class_ids)), class_ids]
55
+
56
+ # Filter by confidence
57
+ mask = confidences > score_threshold
58
+ boxes = boxes[mask]
59
+ confidences = confidences[mask]
60
+ class_ids = class_ids[mask]
61
+ if boxes.size == 0:
62
+ return []
63
+
64
+ # Convert from [cx, cy, w, h] to [x1, y1, x2, y2]
65
+ dx, dy = pad
66
+ gain = ratio[0]
67
+ boxes_xyxy = np.zeros_like(boxes)
68
+ boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2 # x1
69
+ boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2 # y1
70
+ boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2] / 2 # x2
71
+ boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3] / 2 # y2
72
+ boxes_xyxy[:, [0, 2]] = (boxes_xyxy[:, [0, 2]] - dx) / gain
73
+ boxes_xyxy[:, [1, 3]] = (boxes_xyxy[:, [1, 3]] - dy) / gain
74
+
75
+ # Clip to image size
76
+ h0, w0 = image_shape
77
+ boxes_xyxy[:, [0, 2]] = boxes_xyxy[:, [0, 2]].clip(0, w0)
78
+ boxes_xyxy[:, [1, 3]] = boxes_xyxy[:, [1, 3]].clip(0, h0)
79
+
80
+ # Prepare for OpenCV NMSBoxes: convert to [x, y, w, h]
81
+ xywh = []
82
+ for x1, y1, x2, y2 in boxes_xyxy:
83
+ xywh.append([float(x1), float(y1), float(x2 - x1), float(y2 - y1)])
84
+
85
+ # Apply NMS
86
+ indices = cv2.dnn.NMSBoxes(xywh, confidences.tolist(), score_threshold, iou_threshold)
87
+ if len(indices) == 0:
88
+ return []
89
+ indices = indices.flatten()
90
+
91
+ results = []
92
+ for i in indices:
93
+ x1, y1, x2, y2 = boxes_xyxy[i]
94
+ cls = int(class_ids[i])
95
+ conf = float(confidences[i])
96
+ results.append((cls, conf, (x1, y1, x2, y2)))
97
+ return results
98
+
99
+
100
+ def save_labels(labels, image_shape, save_path):
101
+ """
102
+ Save labels in YOLO format: class_id, x_center, y_center, width, height (normalized).
103
+ """
104
+ h, w = image_shape
105
+ with open(save_path, 'w') as f:
106
+ for cls, conf, (x1, y1, x2, y2) in labels:
107
+ xc = (x1 + x2) / 2 / w
108
+ yc = (y1 + y2) / 2 / h
109
+ bw = (x2 - x1) / w
110
+ bh = (y2 - y1) / h
111
+ f.write(f"{cls} {xc:.6f} {yc:.6f} {bw:.6f} {bh:.6f}\n")
112
+
113
+
114
+ def labelize_folder(folder, session, display=False):
115
+ """
116
+ Process all images in a folder: run detection, save labels, optionally display images.
117
+ """
118
+ input_name = session.get_inputs()[0].name
119
+ H, W = session.get_inputs()[0].shape[2:]
120
+
121
+ for filename in os.listdir(folder):
122
+ if not (filename.lower().endswith('.jpg') or filename.lower().endswith('.png')):
123
+ continue
124
+ img_path = os.path.join(folder, filename)
125
+ image = cv2.imread(img_path)
126
+ if image is None:
127
+ continue
128
+
129
+ img_input, ratio, pad = preprocess(image, (H, W))
130
+ outputs = session.run(None, {input_name: img_input})[0]
131
+ labels = postprocess(outputs, image.shape[:2], ratio, pad)
132
+
133
+ txt_path = os.path.splitext(img_path)[0] + '.txt'
134
+ save_labels(labels, image.shape[:2], txt_path)
135
+
136
+ if display:
137
+ for cls, conf, (x1, y1, x2, y2) in labels:
138
+ cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
139
+ cv2.putText(image, f"{cls}:{conf:.2f}", (int(x1), int(y1)-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1)
140
+ cv2.imshow('Detections', image)
141
+ cv2.waitKey(0)
142
+ cv2.destroyAllWindows()
143
+
144
+
145
+ if __name__ == '__main__':
146
+ # Folder with images to process
147
+ folder = './dataset'
148
+ # Path to ONNX model
149
+ model_path = './hs-real-y11n-640-fp32.onnx'
150
+
151
+ # Create ONNX Runtime session
152
+ sess = ort.InferenceSession(model_path)
153
+
154
+ # Run labelization
155
+ labelize_folder(folder, sess, display=False)
yolo-07-2025/labels.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FEMALE_FACE
2
+ MALE_FACE
3
+ FEMALE_GENITALIA_COVERED
4
+ FEMALE_GENITALIA_EXPOSED
5
+ BUTTOCKS_COVERED
6
+ BUTTOCKS_EXPOSED
7
+ FEMALE_BREAST_COVERED
8
+ FEMALE_BREAST_EXPOSED
9
+ MALE_BREAST_EXPOSED
10
+ ARMPITS_EXPOSED
11
+ BELLY_EXPOSED
12
+ MALE_GENITALIA_EXPOSED
13
+ ANUS_EXPOSED
14
+ FEET_COVERED
15
+ FEET_EXPOSED
16
+ EYE