padmanabhbosamia commited on
Commit
9204b05
·
1 Parent(s): 3fbb257

Upload 7 files

Browse files
Files changed (7) hide show
  1. Yolov3_Padmanabh.pth +3 -0
  2. app.py +201 -0
  3. config.py +215 -0
  4. grad_cam_func.py +150 -0
  5. loss.py +78 -0
  6. model.py +176 -0
  7. utils.py +577 -0
Yolov3_Padmanabh.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ab8201e7c32395ad51a303d8df752eddaf9cc3910c53c3833c03bccfd581773
3
+ size 246878895
app.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+
3
+ import config as config
4
+ import cv2
5
+ import gradio as gr
6
+ import matplotlib.patches as patches
7
+ import matplotlib.pyplot as plt
8
+ import numpy as np
9
+ import torch
10
+ import torchvision
11
+ import utils
12
+ from loss import YoloLoss
13
+ from model import YOLOv3
14
+ from PIL import Image
15
+ from torch.utils.data import DataLoader
16
+ from torchvision import transforms
17
+ from utils import get_loaders
18
+
19
+ new_state_dict = {}
20
+ state_dict = torch.load('model/Yolov3_Padmanabh.pth', map_location=torch.device('cpu'))
21
+ for key, value in state_dict.items():
22
+ new_key = key.replace('model.', '')
23
+ new_state_dict[new_key] = value
24
+
25
+ model = YOLOv3(in_channels=3, num_classes=config.NUM_CLASSES)
26
+ model.load_state_dict(new_state_dict, strict=True)
27
+ model.eval()
28
+
29
+ classes = ("aeroplane",
30
+ "bicycle",
31
+ "bird",
32
+ "boat",
33
+ "bottle",
34
+ "bus",
35
+ "car",
36
+ "cat",
37
+ "chair",
38
+ "cow",
39
+ "diningtable",
40
+ "dog",
41
+ "horse",
42
+ "motorbike",
43
+ "person",
44
+ "pottedplant",
45
+ "sheep",
46
+ "sofa",
47
+ "train",
48
+ "tvmonitor")
49
+
50
+
51
+ import grad_cam_func as gcf
52
+ from pytorch_grad_cam.activations_and_gradients import ActivationsAndGradients
53
+ from pytorch_grad_cam.utils.image import show_cam_on_image
54
+ from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
55
+
56
+
57
+ def inference(input_img=None, iou_threshold=0.6, conf_threshold=0.5, gc_trans=0.3):
58
+
59
+ if input_img is not None:
60
+
61
+ tranform_img = config.infer_transforms(image=input_img)
62
+ transform_img = tranform_img['image'].unsqueeze(0)
63
+
64
+ transform_img_visual = config.infer_transforms_visualization(image=input_img)['image']
65
+
66
+ with torch.no_grad():
67
+ outputs = model(transform_img)
68
+ bboxes = [[] for _ in range(transform_img.shape[0])] # range of Batch size
69
+
70
+ for i in range(3):
71
+ batch_size, A, S, _, _ = outputs[i].shape
72
+ anchor = np.array(config.SCALED_ANCHORS[i])
73
+ boxes_scale_i = utils.cells_to_bboxes(
74
+ outputs[i], anchor, S=S, is_preds=True)
75
+
76
+ for idx, (box) in enumerate(boxes_scale_i):
77
+ bboxes[idx] += box
78
+
79
+
80
+ nms_boxes = utils.non_max_suppression(bboxes[0], iou_threshold=iou_threshold,
81
+ threshold=conf_threshold, box_format="midpoint",)
82
+
83
+
84
+ image, boxes = transform_img_visual.permute(1,2,0), nms_boxes
85
+
86
+ """Plots predicted bounding boxes on the image"""
87
+ cmap = plt.get_cmap("tab20b")
88
+ class_labels = config.PASCAL_CLASSES
89
+ colors = [cmap(i) for i in np.linspace(0, 1, len(class_labels))]
90
+
91
+ im = np.array(image)
92
+ height, width, _ = im.shape
93
+
94
+ # Create figure and axes
95
+ fig, ax = plt.subplots(1)
96
+
97
+ # Display the image
98
+ ax.imshow(im)
99
+
100
+ # box[0] is x midpoint, box[2] is width
101
+ # box[1] is y midpoint, box[3] is height
102
+
103
+ # Create a Rectangle patch
104
+ for box in boxes:
105
+ assert len(box) == 6, "box should contain class pred, confidence, x, y, width, height"
106
+ class_pred = box[0]
107
+ box = box[2:]
108
+ upper_left_x = box[0] - box[2] / 2
109
+ upper_left_y = box[1] - box[3] / 2
110
+ rect = patches.Rectangle(
111
+ (upper_left_x * width, upper_left_y * height),
112
+ box[2] * width,
113
+ box[3] * height,
114
+ linewidth=2,
115
+ edgecolor=colors[int(class_pred)],
116
+ facecolor="none",
117
+ )
118
+ # Add the patch to the Axes
119
+ ax.add_patch(rect)
120
+ plt.text(
121
+ upper_left_x * width,
122
+ upper_left_y * height,
123
+ s=class_labels[int(class_pred)],
124
+ color="white",
125
+ verticalalignment="top",
126
+ bbox={"color": colors[int(class_pred)], "pad": 0},
127
+ )
128
+
129
+ plt.axis('off')
130
+
131
+ fig.canvas.draw()
132
+
133
+ fig_img = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
134
+ fig_img = fig_img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
135
+
136
+ plt.close(fig)
137
+
138
+ outputs_inference_bb = fig_img
139
+
140
+ ### GradCAM
141
+
142
+ target_layer = [model.layers[-2]]
143
+ cam = gcf.BaseCAM(model, target_layer)
144
+
145
+ AnG = ActivationsAndGradients(model, target_layer, None)
146
+ outputs = AnG(transform_img)
147
+
148
+ bboxes = [[] for _ in range(1)]
149
+ for i in range(3):
150
+ batch_size, A, S, _, _ = outputs[i].shape
151
+ anchor = config.SCALED_ANCHORS[i]
152
+ boxes_scale_i = utils.cells_to_bboxes(
153
+ outputs[i], anchor, S=S, is_preds=True
154
+ )
155
+ for idx, (box) in enumerate(boxes_scale_i):
156
+ bboxes[idx] += box
157
+
158
+ nms_boxes = utils.non_max_suppression(
159
+ bboxes[0], iou_threshold=0.5, threshold=0.4, box_format="midpoint",
160
+ )
161
+
162
+ target_categories = [box[0] for box in nms_boxes]
163
+ targets = [ClassifierOutputTarget(
164
+ category) for category in target_categories]
165
+
166
+ help_ = cam.compute_cam_per_layer(transform_img, targets, False)
167
+
168
+ output_gc = cam.aggregate_multi_layers(help_)[0, :, :]
169
+
170
+ img = cv2.resize(input_img, (416, 416))
171
+ img = np.float32(img) / 255
172
+ cam_image = show_cam_on_image(img, output_gc, use_rgb=True, image_weight=gc_trans)
173
+
174
+ outputs_inference_gc = cam_image
175
+
176
+ else:
177
+ outputs_inference_bb = None
178
+ outputs_inference_gc = None
179
+
180
+ return outputs_inference_bb, outputs_inference_gc
181
+
182
+
183
+
184
+ title = "PASCAL VOC trained on Yolov3"
185
+ description = "A simple Gradio interface to infer on Yolov3 model, and get GradCAM results"
186
+ examples = [['examples/test_'+str(i)+'.jpg', 0.6, 0.5, 0.3] for i in range(10)]
187
+
188
+ demo = gr.Interface(inference,
189
+ inputs = [gr.Image(label="Input image"),
190
+ gr.Slider(0, 1, value=0.6, label="IOU Threshold"),
191
+ gr.Slider(0, 1, value=0.4, label="Threshold"),
192
+ gr.Slider(0, 1, value=0.5, label="GradCAM Transparency"),
193
+ ],
194
+ outputs = [
195
+ gr.Image(label="Yolov3 Prediction"),
196
+ gr.Image(label="GradCAM Output"),],
197
+ title = title,
198
+ description = description,
199
+ examples = examples
200
+ )
201
+ demo.launch()
config.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import albumentations as A
2
+ import cv2
3
+ import torch
4
+ import os
5
+
6
+ from albumentations.pytorch import ToTensorV2
7
+ # from utils import seed_everything
8
+
9
+ DATASET = 'PASCAL_VOC'
10
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
11
+ # seed_everything() # If you want deterministic behavior
12
+
13
+ IN_CHANNELS = 3
14
+ NUM_WORKERS = os.cpu_count() - 2
15
+ BATCH_SIZE = 32
16
+ IMAGE_SIZE = 416
17
+ NUM_CLASSES = 20
18
+ LEARNING_RATE = 1e-5
19
+ MAX_LEARNING_RATE = 5e-4
20
+ WEIGHT_DECAY = 1e-4
21
+ NUM_EPOCHS = 100
22
+ CONF_THRESHOLD = 0.05
23
+ MAP_IOU_THRESH = 0.5
24
+ NMS_IOU_THRESH = 0.45
25
+ S = [IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8]
26
+ PIN_MEMORY = True
27
+ LOAD_MODEL = False
28
+ SAVE_MODEL = True
29
+ CHECKPOINT_FILE = "checkpoint.pth.tar"
30
+ IMG_DIR = DATASET + "/images/"
31
+ LABEL_DIR = DATASET + "/labels/"
32
+ MOSAIC_PROB_TRAIN = 0.75
33
+ MOSAIC_PROB_TEST = 0.
34
+
35
+ ANCHORS = [
36
+ [(0.28, 0.22), (0.38, 0.48), (0.9, 0.78)],
37
+ [(0.07, 0.15), (0.15, 0.11), (0.14, 0.29)],
38
+ [(0.02, 0.03), (0.04, 0.07), (0.08, 0.06)],
39
+ ] # Note these have been rescaled to be between [0, 1]
40
+
41
+ means = [0.485, 0.456, 0.406]
42
+
43
+ scale = 1.1
44
+ train_transforms = A.Compose(
45
+ [
46
+ A.LongestMaxSize(max_size=int(IMAGE_SIZE * scale)),
47
+ A.PadIfNeeded(
48
+ min_height=int(IMAGE_SIZE * scale),
49
+ min_width=int(IMAGE_SIZE * scale),
50
+ border_mode=cv2.BORDER_CONSTANT,
51
+ ),
52
+ A.Rotate(limit = 10, interpolation=1, border_mode=4),
53
+ A.RandomCrop(width=IMAGE_SIZE, height=IMAGE_SIZE),
54
+ A.ColorJitter(brightness=0.6, contrast=0.6, saturation=0.6, hue=0.6, p=0.4),
55
+ A.OneOf(
56
+ [
57
+ A.ShiftScaleRotate(
58
+ rotate_limit=20, p=0.5, border_mode=cv2.BORDER_CONSTANT
59
+ ),
60
+ # A.Affine(shear=15, p=0.5, mode="constant"),
61
+ ],
62
+ p=1.0,
63
+ ),
64
+ A.HorizontalFlip(p=0.5),
65
+ A.Blur(p=0.1),
66
+ A.CLAHE(p=0.1),
67
+ A.Posterize(p=0.1),
68
+ A.ToGray(p=0.1),
69
+ A.ChannelShuffle(p=0.05),
70
+ A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
71
+ ToTensorV2(),
72
+ ],
73
+ bbox_params=A.BboxParams(format="yolo", min_visibility=0.4, label_fields=[],),
74
+ )
75
+ test_transforms = A.Compose(
76
+ [
77
+ A.LongestMaxSize(max_size=IMAGE_SIZE),
78
+ A.PadIfNeeded(
79
+ min_height=IMAGE_SIZE, min_width=IMAGE_SIZE, border_mode=cv2.BORDER_CONSTANT
80
+ ),
81
+ A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
82
+ ToTensorV2(),
83
+ ],
84
+ bbox_params=A.BboxParams(format="yolo", min_visibility=0.4, label_fields=[]),
85
+ )
86
+
87
+ infer_transforms = A.Compose(
88
+ [
89
+ A.LongestMaxSize(max_size=IMAGE_SIZE),
90
+ A.PadIfNeeded(
91
+ min_height=IMAGE_SIZE, min_width=IMAGE_SIZE, border_mode=cv2.BORDER_CONSTANT
92
+ ),
93
+ A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
94
+ ToTensorV2(),
95
+ ])
96
+
97
+ infer_transforms_visualization = A.Compose(
98
+ [
99
+ A.LongestMaxSize(max_size=IMAGE_SIZE),
100
+ A.PadIfNeeded(
101
+ min_height=IMAGE_SIZE, min_width=IMAGE_SIZE, border_mode=cv2.BORDER_CONSTANT
102
+ ),
103
+ ToTensorV2(),
104
+ ])
105
+
106
+
107
+ SCALED_ANCHORS = (
108
+ torch.tensor(ANCHORS) * torch.tensor(S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)
109
+ )
110
+
111
+
112
+ PASCAL_CLASSES = [
113
+ "aeroplane",
114
+ "bicycle",
115
+ "bird",
116
+ "boat",
117
+ "bottle",
118
+ "bus",
119
+ "car",
120
+ "cat",
121
+ "chair",
122
+ "cow",
123
+ "diningtable",
124
+ "dog",
125
+ "horse",
126
+ "motorbike",
127
+ "person",
128
+ "pottedplant",
129
+ "sheep",
130
+ "sofa",
131
+ "train",
132
+ "tvmonitor"
133
+ ]
134
+
135
+ COCO_LABELS = ['person',
136
+ 'bicycle',
137
+ 'car',
138
+ 'motorcycle',
139
+ 'airplane',
140
+ 'bus',
141
+ 'train',
142
+ 'truck',
143
+ 'boat',
144
+ 'traffic light',
145
+ 'fire hydrant',
146
+ 'stop sign',
147
+ 'parking meter',
148
+ 'bench',
149
+ 'bird',
150
+ 'cat',
151
+ 'dog',
152
+ 'horse',
153
+ 'sheep',
154
+ 'cow',
155
+ 'elephant',
156
+ 'bear',
157
+ 'zebra',
158
+ 'giraffe',
159
+ 'backpack',
160
+ 'umbrella',
161
+ 'handbag',
162
+ 'tie',
163
+ 'suitcase',
164
+ 'frisbee',
165
+ 'skis',
166
+ 'snowboard',
167
+ 'sports ball',
168
+ 'kite',
169
+ 'baseball bat',
170
+ 'baseball glove',
171
+ 'skateboard',
172
+ 'surfboard',
173
+ 'tennis racket',
174
+ 'bottle',
175
+ 'wine glass',
176
+ 'cup',
177
+ 'fork',
178
+ 'knife',
179
+ 'spoon',
180
+ 'bowl',
181
+ 'banana',
182
+ 'apple',
183
+ 'sandwich',
184
+ 'orange',
185
+ 'broccoli',
186
+ 'carrot',
187
+ 'hot dog',
188
+ 'pizza',
189
+ 'donut',
190
+ 'cake',
191
+ 'chair',
192
+ 'couch',
193
+ 'potted plant',
194
+ 'bed',
195
+ 'dining table',
196
+ 'toilet',
197
+ 'tv',
198
+ 'laptop',
199
+ 'mouse',
200
+ 'remote',
201
+ 'keyboard',
202
+ 'cell phone',
203
+ 'microwave',
204
+ 'oven',
205
+ 'toaster',
206
+ 'sink',
207
+ 'refrigerator',
208
+ 'book',
209
+ 'clock',
210
+ 'vase',
211
+ 'scissors',
212
+ 'teddy bear',
213
+ 'hair drier',
214
+ 'toothbrush'
215
+ ]
grad_cam_func.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import ttach as tta
4
+ from typing import Callable, List, Tuple
5
+ from pytorch_grad_cam.activations_and_gradients import ActivationsAndGradients
6
+ from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection
7
+ from pytorch_grad_cam.utils.image import scale_cam_image
8
+ from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
9
+ import pandas as pd
10
+
11
+ import config as config
12
+ import utils
13
+
14
+ class BaseCAM:
15
+ def __init__(self,
16
+ model: torch.nn.Module,
17
+ target_layers: List[torch.nn.Module],
18
+ use_cuda: bool = False,
19
+ reshape_transform: Callable = None,
20
+ compute_input_gradient: bool = False,
21
+ uses_gradients: bool = True) -> None:
22
+
23
+ self.model = model.eval()
24
+ self.target_layers = target_layers
25
+ self.cuda = use_cuda
26
+ if self.cuda:
27
+ self.model = model.cuda()
28
+ self.reshape_transform = reshape_transform
29
+ self.compute_input_gradient = compute_input_gradient
30
+ self.uses_gradients = uses_gradients
31
+ self.activations_and_grads = ActivationsAndGradients(
32
+ self.model, target_layers, reshape_transform)
33
+
34
+ """ Get a vector of weights for every channel in the target layer.
35
+ Methods that return weights channels,
36
+ will typically need to only implement this function. """
37
+
38
+ def get_cam_image(self,
39
+ input_tensor: torch.Tensor,
40
+ target_layer: torch.nn.Module,
41
+ targets: List[torch.nn.Module],
42
+ activations: torch.Tensor,
43
+ grads: torch.Tensor,
44
+ eigen_smooth: bool = False) -> np.ndarray:
45
+
46
+ return get_2d_projection(activations)
47
+
48
+ def forward(self,
49
+ input_tensor: torch.Tensor,
50
+ targets: List[torch.nn.Module],
51
+ eigen_smooth: bool = False) -> np.ndarray:
52
+
53
+ if self.cuda:
54
+ input_tensor = input_tensor.cuda()
55
+
56
+ if self.compute_input_gradient:
57
+ input_tensor = torch.autograd.Variable(input_tensor,
58
+ requires_grad=True)
59
+
60
+ outputs = self.activations_and_grads(input_tensor)
61
+
62
+ if targets is None:
63
+ bboxes = [[] for _ in range(1)]
64
+ for i in range(3):
65
+ batch_size, A, S, _, _ = outputs[i].shape
66
+ anchor = config.SCALED_ANCHORS[i]
67
+ boxes_scale_i = utils.cells_to_bboxes(
68
+ outputs[i], anchor, S=S, is_preds=True
69
+ )
70
+ for idx, (box) in enumerate(boxes_scale_i):
71
+ bboxes[idx] += box
72
+
73
+ nms_boxes = utils.non_max_suppression(
74
+ bboxes[0], iou_threshold=0.5, threshold=0.4, box_format="midpoint",
75
+ )
76
+ # target_categories = np.argmax(outputs.cpu().data.numpy(), axis=-1)
77
+ target_categories = [box[0] for box in nms_boxes]
78
+ targets = [ClassifierOutputTarget(
79
+ category) for category in target_categories]
80
+
81
+
82
+ if self.uses_gradients:
83
+ self.model.zero_grad()
84
+ loss = sum([target(output)
85
+ for target, output in zip(targets, outputs)])
86
+ loss.backward(retain_graph=True)
87
+
88
+ # In most of the saliency attribution papers, the saliency is
89
+ # computed with a single target layer.
90
+ # Commonly it is the last convolutional layer.
91
+ # Here we support passing a list with multiple target layers.
92
+ # It will compute the saliency image for every image,
93
+ # and then aggregate them (with a default mean aggregation).
94
+ # This gives you more flexibility in case you just want to
95
+ # use all conv layers for example, all Batchnorm layers,
96
+ # or something else.
97
+
98
+ cam_per_layer = self.compute_cam_per_layer(input_tensor,
99
+ targets,
100
+ eigen_smooth)
101
+ return self.aggregate_multi_layers(cam_per_layer)
102
+
103
+ def get_target_width_height(self,
104
+ input_tensor: torch.Tensor) -> Tuple[int, int]:
105
+ width, height = input_tensor.size(-1), input_tensor.size(-2)
106
+ return width, height
107
+
108
+ def compute_cam_per_layer(
109
+ self,
110
+ input_tensor: torch.Tensor,
111
+ targets: List[torch.nn.Module],
112
+ eigen_smooth: bool) -> np.ndarray:
113
+
114
+ activations_list = [a.cpu().data.numpy()
115
+ for a in self.activations_and_grads.activations]
116
+ grads_list = [g.cpu().data.numpy()
117
+ for g in self.activations_and_grads.gradients]
118
+ target_size = self.get_target_width_height(input_tensor)
119
+
120
+ cam_per_target_layer = []
121
+ # Loop over the saliency image from every layer
122
+ for i in range(len(self.target_layers)):
123
+ target_layer = self.target_layers[i]
124
+ layer_activations = None
125
+ layer_grads = None
126
+ if i < len(activations_list):
127
+ layer_activations = activations_list[i]
128
+ if i < len(grads_list):
129
+ layer_grads = grads_list[i]
130
+
131
+ cam = self.get_cam_image(input_tensor,
132
+ target_layer,
133
+ targets,
134
+ layer_activations,
135
+ layer_grads,
136
+ eigen_smooth)
137
+ cam = np.maximum(cam, 0)
138
+ scaled = scale_cam_image(cam, target_size)
139
+ cam_per_target_layer.append(scaled[:, None, :])
140
+
141
+ return cam_per_target_layer
142
+
143
+ def aggregate_multi_layers(
144
+ self,
145
+ cam_per_target_layer: np.ndarray) -> np.ndarray:
146
+ cam_per_target_layer = np.concatenate(cam_per_target_layer, axis=1)
147
+ cam_per_target_layer = np.maximum(cam_per_target_layer, 0)
148
+ result = np.mean(cam_per_target_layer, axis=1)
149
+
150
+ return scale_cam_image(result)
loss.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of Yolo Loss Function similar to the one in Yolov3 paper,
3
+ the difference from what I can tell is I use CrossEntropy for the classes
4
+ instead of BinaryCrossEntropy.
5
+ """
6
+ import random
7
+ import torch
8
+ import torch.nn as nn
9
+ from utils import intersection_over_union
10
+
11
+
12
+ class YoloLoss(nn.Module):
13
+ def __init__(self):
14
+ super().__init__()
15
+ self.mse = nn.MSELoss()
16
+ self.bce = nn.BCEWithLogitsLoss()
17
+ self.entropy = nn.CrossEntropyLoss()
18
+ self.sigmoid = nn.Sigmoid()
19
+
20
+ # Constants signifying how much to pay for each respective part of the loss
21
+ self.lambda_class = 1
22
+ self.lambda_noobj = 10
23
+ self.lambda_obj = 1
24
+ self.lambda_box = 10
25
+
26
+ def forward(self, predictions, target, anchors):
27
+ # Check where obj and noobj (we ignore if target == -1)
28
+ obj = target[..., 0] == 1 # in paper this is Iobj_i
29
+ noobj = target[..., 0] == 0 # in paper this is Inoobj_i
30
+
31
+ # ======================= #
32
+ # FOR NO OBJECT LOSS #
33
+ # ======================= #
34
+
35
+ no_object_loss = self.bce(
36
+ (predictions[..., 0:1][noobj]), (target[..., 0:1][noobj]),
37
+ )
38
+
39
+ # ==================== #
40
+ # FOR OBJECT LOSS #
41
+ # ==================== #
42
+
43
+ anchors = anchors.reshape(1, 3, 1, 1, 2)
44
+ box_preds = torch.cat([self.sigmoid(predictions[..., 1:3]), torch.exp(predictions[..., 3:5]) * anchors], dim=-1)
45
+ ious = intersection_over_union(box_preds[obj], target[..., 1:5][obj]).detach()
46
+ object_loss = self.mse(self.sigmoid(predictions[..., 0:1][obj]), ious * target[..., 0:1][obj])
47
+
48
+ # ======================== #
49
+ # FOR BOX COORDINATES #
50
+ # ======================== #
51
+
52
+ predictions[..., 1:3] = self.sigmoid(predictions[..., 1:3]) # x,y coordinates
53
+ target[..., 3:5] = torch.log(
54
+ (1e-16 + target[..., 3:5] / anchors)
55
+ ) # width, height coordinates
56
+ box_loss = self.mse(predictions[..., 1:5][obj], target[..., 1:5][obj])
57
+
58
+ # ================== #
59
+ # FOR CLASS LOSS #
60
+ # ================== #
61
+
62
+ class_loss = self.entropy(
63
+ (predictions[..., 5:][obj]), (target[..., 5][obj].long()),
64
+ )
65
+
66
+ #print("__________________________________")
67
+ #print(self.lambda_box * box_loss)
68
+ #print(self.lambda_obj * object_loss)
69
+ #print(self.lambda_noobj * no_object_loss)
70
+ #print(self.lambda_class * class_loss)
71
+ #print("\n")
72
+
73
+ return (
74
+ self.lambda_box * box_loss
75
+ + self.lambda_obj * object_loss
76
+ + self.lambda_noobj * no_object_loss
77
+ + self.lambda_class * class_loss
78
+ )
model.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of YOLOv3 architecture
3
+ """
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ """
9
+ Information about architecture config:
10
+ Tuple is structured by (filters, kernel_size, stride)
11
+ Every conv is a same convolution.
12
+ List is structured by "B" indicating a residual block followed by the number of repeats
13
+ "S" is for scale prediction block and computing the yolo loss
14
+ "U" is for upsampling the feature map and concatenating with a previous layer
15
+ """
16
+ config = [
17
+ (32, 3, 1),
18
+ (64, 3, 2),
19
+ ["B", 1],
20
+ (128, 3, 2),
21
+ ["B", 2],
22
+ (256, 3, 2),
23
+ ["B", 8],
24
+ (512, 3, 2),
25
+ ["B", 8],
26
+ (1024, 3, 2),
27
+ ["B", 4], # To this point is Darknet-53
28
+ (512, 1, 1),
29
+ (1024, 3, 1),
30
+ "S",
31
+ (256, 1, 1),
32
+ "U",
33
+ (256, 1, 1),
34
+ (512, 3, 1),
35
+ "S",
36
+ (128, 1, 1),
37
+ "U",
38
+ (128, 1, 1),
39
+ (256, 3, 1),
40
+ "S",
41
+ ]
42
+
43
+
44
+ class CNNBlock(nn.Module):
45
+ def __init__(self, in_channels, out_channels, bn_act=True, **kwargs):
46
+ super().__init__()
47
+ self.conv = nn.Conv2d(in_channels, out_channels, bias=not bn_act, **kwargs)
48
+ self.bn = nn.BatchNorm2d(out_channels)
49
+ self.leaky = nn.LeakyReLU(0.1)
50
+ self.use_bn_act = bn_act
51
+
52
+ def forward(self, x):
53
+ if self.use_bn_act:
54
+ return self.leaky(self.bn(self.conv(x)))
55
+ else:
56
+ return self.conv(x)
57
+
58
+
59
+ class ResidualBlock(nn.Module):
60
+ def __init__(self, channels, use_residual=True, num_repeats=1):
61
+ super().__init__()
62
+ self.layers = nn.ModuleList()
63
+ for repeat in range(num_repeats):
64
+ self.layers += [
65
+ nn.Sequential(
66
+ CNNBlock(channels, channels // 2, kernel_size=1),
67
+ CNNBlock(channels // 2, channels, kernel_size=3, padding=1),
68
+ )
69
+ ]
70
+
71
+ self.use_residual = use_residual
72
+ self.num_repeats = num_repeats
73
+
74
+ def forward(self, x):
75
+ for layer in self.layers:
76
+ if self.use_residual:
77
+ x = x + layer(x)
78
+ else:
79
+ x = layer(x)
80
+
81
+ return x
82
+
83
+
84
+ class ScalePrediction(nn.Module):
85
+ def __init__(self, in_channels, num_classes):
86
+ super().__init__()
87
+ self.pred = nn.Sequential(
88
+ CNNBlock(in_channels, 2 * in_channels, kernel_size=3, padding=1),
89
+ CNNBlock(
90
+ 2 * in_channels, (num_classes + 5) * 3, bn_act=False, kernel_size=1
91
+ ),
92
+ )
93
+ self.num_classes = num_classes
94
+
95
+ def forward(self, x):
96
+ return (
97
+ self.pred(x)
98
+ .reshape(x.shape[0], 3, self.num_classes + 5, x.shape[2], x.shape[3])
99
+ .permute(0, 1, 3, 4, 2)
100
+ )
101
+
102
+
103
+ class YOLOv3(nn.Module):
104
+ def __init__(self, in_channels=3, num_classes=80):
105
+ super().__init__()
106
+ self.num_classes = num_classes
107
+ self.in_channels = in_channels
108
+ self.layers = self._create_conv_layers()
109
+
110
+ def forward(self, x):
111
+ outputs = [] # for each scale
112
+ route_connections = []
113
+ for layer in self.layers:
114
+ if isinstance(layer, ScalePrediction):
115
+ outputs.append(layer(x))
116
+ continue
117
+
118
+ x = layer(x)
119
+
120
+ if isinstance(layer, ResidualBlock) and layer.num_repeats == 8:
121
+ route_connections.append(x)
122
+
123
+ elif isinstance(layer, nn.Upsample):
124
+ x = torch.cat([x, route_connections[-1]], dim=1)
125
+ route_connections.pop()
126
+
127
+ return outputs
128
+
129
+ def _create_conv_layers(self):
130
+ layers = nn.ModuleList()
131
+ in_channels = self.in_channels
132
+
133
+ for module in config:
134
+ if isinstance(module, tuple):
135
+ out_channels, kernel_size, stride = module
136
+ layers.append(
137
+ CNNBlock(
138
+ in_channels,
139
+ out_channels,
140
+ kernel_size=kernel_size,
141
+ stride=stride,
142
+ padding=1 if kernel_size == 3 else 0,
143
+ )
144
+ )
145
+ in_channels = out_channels
146
+
147
+ elif isinstance(module, list):
148
+ num_repeats = module[1]
149
+ layers.append(ResidualBlock(in_channels, num_repeats=num_repeats,))
150
+
151
+ elif isinstance(module, str):
152
+ if module == "S":
153
+ layers += [
154
+ ResidualBlock(in_channels, use_residual=False, num_repeats=1),
155
+ CNNBlock(in_channels, in_channels // 2, kernel_size=1),
156
+ ScalePrediction(in_channels // 2, num_classes=self.num_classes),
157
+ ]
158
+ in_channels = in_channels // 2
159
+
160
+ elif module == "U":
161
+ layers.append(nn.Upsample(scale_factor=2),)
162
+ in_channels = in_channels * 3
163
+
164
+ return layers
165
+
166
+
167
+ if __name__ == "__main__":
168
+ num_classes = 20
169
+ IMAGE_SIZE = 416
170
+ model = YOLOv3(num_classes=num_classes)
171
+ x = torch.randn((2, 3, IMAGE_SIZE, IMAGE_SIZE))
172
+ out = model(x)
173
+ assert model(x)[0].shape == (2, 3, IMAGE_SIZE//32, IMAGE_SIZE//32, num_classes + 5)
174
+ assert model(x)[1].shape == (2, 3, IMAGE_SIZE//16, IMAGE_SIZE//16, num_classes + 5)
175
+ assert model(x)[2].shape == (2, 3, IMAGE_SIZE//8, IMAGE_SIZE//8, num_classes + 5)
176
+ print("Success!")
utils.py ADDED
@@ -0,0 +1,577 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import config
2
+ import matplotlib.pyplot as plt
3
+ import matplotlib.patches as patches
4
+ import numpy as np
5
+ import os
6
+ import random
7
+ import torch
8
+
9
+ from collections import Counter
10
+ from torch.utils.data import DataLoader
11
+ from tqdm import tqdm
12
+
13
+
14
+ def iou_width_height(boxes1, boxes2):
15
+ """
16
+ Parameters:
17
+ boxes1 (tensor): width and height of the first bounding boxes
18
+ boxes2 (tensor): width and height of the second bounding boxes
19
+ Returns:
20
+ tensor: Intersection over union of the corresponding boxes
21
+ """
22
+ intersection = torch.min(boxes1[..., 0], boxes2[..., 0]) * torch.min(
23
+ boxes1[..., 1], boxes2[..., 1]
24
+ )
25
+ union = (
26
+ boxes1[..., 0] * boxes1[..., 1] + boxes2[..., 0] * boxes2[..., 1] - intersection
27
+ )
28
+ return intersection / union
29
+
30
+
31
+ def intersection_over_union(boxes_preds, boxes_labels, box_format="midpoint"):
32
+ """
33
+ Video explanation of this function:
34
+ https://youtu.be/XXYG5ZWtjj0
35
+ This function calculates intersection over union (iou) given pred boxes
36
+ and target boxes.
37
+ Parameters:
38
+ boxes_preds (tensor): Predictions of Bounding Boxes (BATCH_SIZE, 4)
39
+ boxes_labels (tensor): Correct labels of Bounding Boxes (BATCH_SIZE, 4)
40
+ box_format (str): midpoint/corners, if boxes (x,y,w,h) or (x1,y1,x2,y2)
41
+ Returns:
42
+ tensor: Intersection over union for all examples
43
+ """
44
+
45
+ if box_format == "midpoint":
46
+ box1_x1 = boxes_preds[..., 0:1] - boxes_preds[..., 2:3] / 2
47
+ box1_y1 = boxes_preds[..., 1:2] - boxes_preds[..., 3:4] / 2
48
+ box1_x2 = boxes_preds[..., 0:1] + boxes_preds[..., 2:3] / 2
49
+ box1_y2 = boxes_preds[..., 1:2] + boxes_preds[..., 3:4] / 2
50
+ box2_x1 = boxes_labels[..., 0:1] - boxes_labels[..., 2:3] / 2
51
+ box2_y1 = boxes_labels[..., 1:2] - boxes_labels[..., 3:4] / 2
52
+ box2_x2 = boxes_labels[..., 0:1] + boxes_labels[..., 2:3] / 2
53
+ box2_y2 = boxes_labels[..., 1:2] + boxes_labels[..., 3:4] / 2
54
+
55
+ if box_format == "corners":
56
+ box1_x1 = boxes_preds[..., 0:1]
57
+ box1_y1 = boxes_preds[..., 1:2]
58
+ box1_x2 = boxes_preds[..., 2:3]
59
+ box1_y2 = boxes_preds[..., 3:4]
60
+ box2_x1 = boxes_labels[..., 0:1]
61
+ box2_y1 = boxes_labels[..., 1:2]
62
+ box2_x2 = boxes_labels[..., 2:3]
63
+ box2_y2 = boxes_labels[..., 3:4]
64
+
65
+ x1 = torch.max(box1_x1, box2_x1)
66
+ y1 = torch.max(box1_y1, box2_y1)
67
+ x2 = torch.min(box1_x2, box2_x2)
68
+ y2 = torch.min(box1_y2, box2_y2)
69
+
70
+ intersection = (x2 - x1).clamp(0) * (y2 - y1).clamp(0)
71
+ box1_area = abs((box1_x2 - box1_x1) * (box1_y2 - box1_y1))
72
+ box2_area = abs((box2_x2 - box2_x1) * (box2_y2 - box2_y1))
73
+
74
+ return intersection / (box1_area + box2_area - intersection + 1e-6)
75
+
76
+
77
+ def non_max_suppression(bboxes, iou_threshold, threshold, box_format="corners"):
78
+ """
79
+ Video explanation of this function:
80
+ https://youtu.be/YDkjWEN8jNA
81
+ Does Non Max Suppression given bboxes
82
+ Parameters:
83
+ bboxes (list): list of lists containing all bboxes with each bboxes
84
+ specified as [class_pred, prob_score, x1, y1, x2, y2]
85
+ iou_threshold (float): threshold where predicted bboxes is correct
86
+ threshold (float): threshold to remove predicted bboxes (independent of IoU)
87
+ box_format (str): "midpoint" or "corners" used to specify bboxes
88
+ Returns:
89
+ list: bboxes after performing NMS given a specific IoU threshold
90
+ """
91
+
92
+ assert type(bboxes) == list
93
+
94
+ bboxes = [box for box in bboxes if box[1] > threshold]
95
+ bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)
96
+ bboxes_after_nms = []
97
+
98
+ while bboxes:
99
+ chosen_box = bboxes.pop(0)
100
+
101
+ bboxes = [
102
+ box
103
+ for box in bboxes
104
+ if box[0] != chosen_box[0]
105
+ or intersection_over_union(
106
+ torch.tensor(chosen_box[2:]),
107
+ torch.tensor(box[2:]),
108
+ box_format=box_format,
109
+ )
110
+ < iou_threshold
111
+ ]
112
+
113
+ bboxes_after_nms.append(chosen_box)
114
+
115
+ return bboxes_after_nms
116
+
117
+
118
+ def mean_average_precision(
119
+ pred_boxes, true_boxes, iou_threshold=0.5, box_format="midpoint", num_classes=20
120
+ ):
121
+ """
122
+ Video explanation of this function:
123
+ https://youtu.be/FppOzcDvaDI
124
+ This function calculates mean average precision (mAP)
125
+ Parameters:
126
+ pred_boxes (list): list of lists containing all bboxes with each bboxes
127
+ specified as [train_idx, class_prediction, prob_score, x1, y1, x2, y2]
128
+ true_boxes (list): Similar as pred_boxes except all the correct ones
129
+ iou_threshold (float): threshold where predicted bboxes is correct
130
+ box_format (str): "midpoint" or "corners" used to specify bboxes
131
+ num_classes (int): number of classes
132
+ Returns:
133
+ float: mAP value across all classes given a specific IoU threshold
134
+ """
135
+
136
+ # list storing all AP for respective classes
137
+ average_precisions = []
138
+
139
+ # used for numerical stability later on
140
+ epsilon = 1e-6
141
+
142
+ for c in range(num_classes):
143
+ detections = []
144
+ ground_truths = []
145
+
146
+ # Go through all predictions and targets,
147
+ # and only add the ones that belong to the
148
+ # current class c
149
+ for detection in pred_boxes:
150
+ if detection[1] == c:
151
+ detections.append(detection)
152
+
153
+ for true_box in true_boxes:
154
+ if true_box[1] == c:
155
+ ground_truths.append(true_box)
156
+
157
+ # find the amount of bboxes for each training example
158
+ # Counter here finds how many ground truth bboxes we get
159
+ # for each training example, so let's say img 0 has 3,
160
+ # img 1 has 5 then we will obtain a dictionary with:
161
+ # amount_bboxes = {0:3, 1:5}
162
+ amount_bboxes = Counter([gt[0] for gt in ground_truths])
163
+
164
+ # We then go through each key, val in this dictionary
165
+ # and convert to the following (w.r.t same example):
166
+ # ammount_bboxes = {0:torch.tensor[0,0,0], 1:torch.tensor[0,0,0,0,0]}
167
+ for key, val in amount_bboxes.items():
168
+ amount_bboxes[key] = torch.zeros(val)
169
+
170
+ # sort by box probabilities which is index 2
171
+ detections.sort(key=lambda x: x[2], reverse=True)
172
+ TP = torch.zeros((len(detections)))
173
+ FP = torch.zeros((len(detections)))
174
+ total_true_bboxes = len(ground_truths)
175
+
176
+ # If none exists for this class then we can safely skip
177
+ if total_true_bboxes == 0:
178
+ continue
179
+
180
+ for detection_idx, detection in enumerate(detections):
181
+ # Only take out the ground_truths that have the same
182
+ # training idx as detection
183
+ ground_truth_img = [
184
+ bbox for bbox in ground_truths if bbox[0] == detection[0]
185
+ ]
186
+
187
+ num_gts = len(ground_truth_img)
188
+ best_iou = 0
189
+
190
+ for idx, gt in enumerate(ground_truth_img):
191
+ iou = intersection_over_union(
192
+ torch.tensor(detection[3:]),
193
+ torch.tensor(gt[3:]),
194
+ box_format=box_format,
195
+ )
196
+
197
+ if iou > best_iou:
198
+ best_iou = iou
199
+ best_gt_idx = idx
200
+
201
+ if best_iou > iou_threshold:
202
+ # only detect ground truth detection once
203
+ if amount_bboxes[detection[0]][best_gt_idx] == 0:
204
+ # true positive and add this bounding box to seen
205
+ TP[detection_idx] = 1
206
+ amount_bboxes[detection[0]][best_gt_idx] = 1
207
+ else:
208
+ FP[detection_idx] = 1
209
+
210
+ # if IOU is lower then the detection is a false positive
211
+ else:
212
+ FP[detection_idx] = 1
213
+
214
+ TP_cumsum = torch.cumsum(TP, dim=0)
215
+ FP_cumsum = torch.cumsum(FP, dim=0)
216
+ recalls = TP_cumsum / (total_true_bboxes + epsilon)
217
+ precisions = TP_cumsum / (TP_cumsum + FP_cumsum + epsilon)
218
+ precisions = torch.cat((torch.tensor([1]), precisions))
219
+ recalls = torch.cat((torch.tensor([0]), recalls))
220
+ # torch.trapz for numerical integration
221
+ average_precisions.append(torch.trapz(precisions, recalls))
222
+
223
+ return sum(average_precisions) / len(average_precisions)
224
+
225
+
226
+ def plot_image(image, boxes):
227
+ """Plots predicted bounding boxes on the image"""
228
+ cmap = plt.get_cmap("tab20b")
229
+ class_labels = config.COCO_LABELS if config.DATASET=='COCO' else config.PASCAL_CLASSES
230
+ colors = [cmap(i) for i in np.linspace(0, 1, len(class_labels))]
231
+ im = np.array(image)
232
+ height, width, _ = im.shape
233
+
234
+ # Create figure and axes
235
+ fig, ax = plt.subplots(1)
236
+ # Display the image
237
+ ax.imshow(im)
238
+
239
+ # box[0] is x midpoint, box[2] is width
240
+ # box[1] is y midpoint, box[3] is height
241
+
242
+ # Create a Rectangle patch
243
+ for box in boxes:
244
+ assert len(box) == 6, "box should contain class pred, confidence, x, y, width, height"
245
+ class_pred = box[0]
246
+ box = box[2:]
247
+ upper_left_x = box[0] - box[2] / 2
248
+ upper_left_y = box[1] - box[3] / 2
249
+ rect = patches.Rectangle(
250
+ (upper_left_x * width, upper_left_y * height),
251
+ box[2] * width,
252
+ box[3] * height,
253
+ linewidth=2,
254
+ edgecolor=colors[int(class_pred)],
255
+ facecolor="none",
256
+ )
257
+ # Add the patch to the Axes
258
+ ax.add_patch(rect)
259
+ plt.text(
260
+ upper_left_x * width,
261
+ upper_left_y * height,
262
+ s=class_labels[int(class_pred)],
263
+ color="white",
264
+ verticalalignment="top",
265
+ bbox={"color": colors[int(class_pred)], "pad": 0},
266
+ )
267
+ plt.axis('off')
268
+ plt.show()
269
+
270
+
271
+ def get_evaluation_bboxes(
272
+ loader,
273
+ model,
274
+ iou_threshold,
275
+ anchors,
276
+ threshold,
277
+ box_format="midpoint",
278
+ device="cuda",
279
+ ):
280
+ # make sure model is in eval before get bboxes
281
+ model.eval()
282
+ train_idx = 0
283
+ all_pred_boxes = []
284
+ all_true_boxes = []
285
+ for batch_idx, (x, labels) in enumerate(tqdm(loader)):
286
+ x = x.to(device)
287
+
288
+ with torch.no_grad():
289
+ predictions = model(x)
290
+
291
+ batch_size = x.shape[0]
292
+ bboxes = [[] for _ in range(batch_size)]
293
+ for i in range(3):
294
+ S = predictions[i].shape[2]
295
+ anchor = torch.tensor([*anchors[i]]).to(device) * S
296
+ boxes_scale_i = cells_to_bboxes(
297
+ predictions[i], anchor, S=S, is_preds=True
298
+ )
299
+ for idx, (box) in enumerate(boxes_scale_i):
300
+ bboxes[idx] += box
301
+
302
+ # we just want one bbox for each label, not one for each scale
303
+ true_bboxes = cells_to_bboxes(
304
+ labels[2], anchor, S=S, is_preds=False
305
+ )
306
+
307
+ for idx in range(batch_size):
308
+ nms_boxes = non_max_suppression(
309
+ bboxes[idx],
310
+ iou_threshold=iou_threshold,
311
+ threshold=threshold,
312
+ box_format=box_format,
313
+ )
314
+
315
+ for nms_box in nms_boxes:
316
+ all_pred_boxes.append([train_idx] + nms_box)
317
+
318
+ for box in true_bboxes[idx]:
319
+ if box[1] > threshold:
320
+ all_true_boxes.append([train_idx] + box)
321
+
322
+ train_idx += 1
323
+
324
+ model.train()
325
+ return all_pred_boxes, all_true_boxes
326
+
327
+
328
+ def cells_to_bboxes(predictions, anchors, S, is_preds=True):
329
+ """
330
+ Scales the predictions coming from the model to
331
+ be relative to the entire image such that they for example later
332
+ can be plotted or.
333
+ INPUT:
334
+ predictions: tensor of size (N, 3, S, S, num_classes+5)
335
+ anchors: the anchors used for the predictions
336
+ S: the number of cells the image is divided in on the width (and height)
337
+ is_preds: whether the input is predictions or the true bounding boxes
338
+ OUTPUT:
339
+ converted_bboxes: the converted boxes of sizes (N, num_anchors, S, S, 1+5) with class index,
340
+ object score, bounding box coordinates
341
+ """
342
+ BATCH_SIZE = predictions.shape[0]
343
+ num_anchors = len(anchors)
344
+ box_predictions = predictions[..., 1:5]
345
+ if is_preds:
346
+ anchors = anchors.reshape(1, len(anchors), 1, 1, 2)
347
+ box_predictions[..., 0:2] = torch.sigmoid(box_predictions[..., 0:2])
348
+ box_predictions[..., 2:] = torch.exp(box_predictions[..., 2:]) * anchors
349
+ scores = torch.sigmoid(predictions[..., 0:1])
350
+ best_class = torch.argmax(predictions[..., 5:], dim=-1).unsqueeze(-1)
351
+ else:
352
+ scores = predictions[..., 0:1]
353
+ best_class = predictions[..., 5:6]
354
+
355
+ cell_indices = (
356
+ torch.arange(S)
357
+ .repeat(predictions.shape[0], 3, S, 1)
358
+ .unsqueeze(-1)
359
+ .to(predictions.device)
360
+ )
361
+ x = 1 / S * (box_predictions[..., 0:1] + cell_indices)
362
+ y = 1 / S * (box_predictions[..., 1:2] + cell_indices.permute(0, 1, 3, 2, 4))
363
+ w_h = 1 / S * box_predictions[..., 2:4]
364
+ converted_bboxes = torch.cat((best_class, scores, x, y, w_h), dim=-1).reshape(BATCH_SIZE, num_anchors * S * S, 6)
365
+ return converted_bboxes.tolist()
366
+
367
+ def check_class_accuracy(model, loader, threshold):
368
+ model.eval()
369
+ tot_class_preds, correct_class = 0, 0
370
+ tot_noobj, correct_noobj = 0, 0
371
+ tot_obj, correct_obj = 0, 0
372
+
373
+ for idx, (x, y) in enumerate(tqdm(loader)):
374
+ x = x.to(config.DEVICE)
375
+ with torch.no_grad():
376
+ out = model(x)
377
+
378
+ for i in range(3):
379
+ y[i] = y[i].to(config.DEVICE)
380
+ obj = y[i][..., 0] == 1 # in paper this is Iobj_i
381
+ noobj = y[i][..., 0] == 0 # in paper this is Iobj_i
382
+
383
+ correct_class += torch.sum(
384
+ torch.argmax(out[i][..., 5:][obj], dim=-1) == y[i][..., 5][obj]
385
+ )
386
+ tot_class_preds += torch.sum(obj)
387
+
388
+ obj_preds = torch.sigmoid(out[i][..., 0]) > threshold
389
+ correct_obj += torch.sum(obj_preds[obj] == y[i][..., 0][obj])
390
+ tot_obj += torch.sum(obj)
391
+ correct_noobj += torch.sum(obj_preds[noobj] == y[i][..., 0][noobj])
392
+ tot_noobj += torch.sum(noobj)
393
+
394
+ # print(f"Class accuracy is: {(correct_class/(tot_class_preds+1e-16))*100:2f}%")
395
+ # print(f"No obj accuracy is: {(correct_noobj/(tot_noobj+1e-16))*100:2f}%")
396
+ # print(f"Obj accuracy is: {(correct_obj/(tot_obj+1e-16))*100:2f}%")
397
+ model.train()
398
+ class_acc = (correct_class / (tot_class_preds + 1e-16)) * 100
399
+ no_obj_acc = (correct_noobj / (tot_noobj + 1e-16)) * 100
400
+ obj_acc = (correct_obj / (tot_obj + 1e-16)) * 100
401
+ return class_acc, no_obj_acc, obj_acc
402
+
403
+
404
+ def get_mean_std(loader):
405
+ # var[X] = E[X**2] - E[X]**2
406
+ channels_sum, channels_sqrd_sum, num_batches = 0, 0, 0
407
+
408
+ for data, _ in tqdm(loader):
409
+ channels_sum += torch.mean(data, dim=[0, 2, 3])
410
+ channels_sqrd_sum += torch.mean(data ** 2, dim=[0, 2, 3])
411
+ num_batches += 1
412
+
413
+ mean = channels_sum / num_batches
414
+ std = (channels_sqrd_sum / num_batches - mean ** 2) ** 0.5
415
+
416
+ return mean, std
417
+
418
+
419
+ def save_checkpoint(model, optimizer, filename="my_checkpoint.pth.tar"):
420
+ print("=> Saving checkpoint")
421
+ checkpoint = {
422
+ "state_dict": model.state_dict(),
423
+ "optimizer": optimizer.state_dict(),
424
+ }
425
+ torch.save(checkpoint, filename)
426
+
427
+
428
+ def load_checkpoint(checkpoint_file, model, optimizer, lr):
429
+ print("=> Loading checkpoint")
430
+ checkpoint = torch.load(checkpoint_file, map_location=config.DEVICE)
431
+ model.load_state_dict(checkpoint["state_dict"])
432
+ optimizer.load_state_dict(checkpoint["optimizer"])
433
+
434
+ # If we don't do this then it will just have learning rate of old checkpoint
435
+ # and it will lead to many hours of debugging \:
436
+ for param_group in optimizer.param_groups:
437
+ param_group["lr"] = lr
438
+
439
+
440
+ def get_loaders(train_csv_path, test_csv_path):
441
+ from dataset import YOLOTrainDataset, YOLOTestDataset
442
+
443
+ IMAGE_SIZE = config.IMAGE_SIZE
444
+ train_dataset = YOLOTrainDataset(
445
+ train_csv_path,
446
+ transform=config.train_transforms,
447
+ S=[IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8],
448
+ img_dir=config.IMG_DIR,
449
+ label_dir=config.LABEL_DIR,
450
+ anchors=config.ANCHORS,
451
+ )
452
+ test_dataset = YOLOTestDataset(
453
+ test_csv_path,
454
+ transform=config.test_transforms,
455
+ S=[IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8],
456
+ img_dir=config.IMG_DIR,
457
+ label_dir=config.LABEL_DIR,
458
+ anchors=config.ANCHORS,
459
+ )
460
+ train_loader = DataLoader(
461
+ dataset=train_dataset,
462
+ batch_size=config.BATCH_SIZE,
463
+ num_workers=config.NUM_WORKERS,
464
+ pin_memory=config.PIN_MEMORY,
465
+ shuffle=True,
466
+ drop_last=False,
467
+ )
468
+ test_loader = DataLoader(
469
+ dataset=test_dataset,
470
+ batch_size=config.BATCH_SIZE,
471
+ num_workers=config.NUM_WORKERS,
472
+ pin_memory=config.PIN_MEMORY,
473
+ shuffle=False,
474
+ drop_last=False,
475
+ )
476
+
477
+ train_eval_dataset = YOLOTestDataset(
478
+ train_csv_path,
479
+ transform=config.test_transforms,
480
+ S=[IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8],
481
+ img_dir=config.IMG_DIR,
482
+ label_dir=config.LABEL_DIR,
483
+ anchors=config.ANCHORS,
484
+ )
485
+ train_eval_loader = DataLoader(
486
+ dataset=train_eval_dataset,
487
+ batch_size=config.BATCH_SIZE,
488
+ num_workers=config.NUM_WORKERS,
489
+ pin_memory=config.PIN_MEMORY,
490
+ shuffle=False,
491
+ drop_last=False,
492
+ )
493
+
494
+ return train_loader, test_loader, train_eval_loader
495
+
496
+ def plot_couple_examples(model, loader, thresh, iou_thresh, anchors):
497
+ model.eval()
498
+ x, y = next(iter(loader))
499
+ x = x.to("cuda")
500
+ with torch.no_grad():
501
+ out = model(x)
502
+ bboxes = [[] for _ in range(x.shape[0])]
503
+ for i in range(3):
504
+ batch_size, A, S, _, _ = out[i].shape
505
+ anchor = anchors[i]
506
+ boxes_scale_i = cells_to_bboxes(
507
+ out[i], anchor, S=S, is_preds=True
508
+ )
509
+ for idx, (box) in enumerate(boxes_scale_i):
510
+ bboxes[idx] += box
511
+
512
+ model.train()
513
+
514
+ for i in range(batch_size//8):
515
+ nms_boxes = non_max_suppression(
516
+ bboxes[i], iou_threshold=iou_thresh, threshold=thresh, box_format="midpoint",
517
+ )
518
+ plot_image(x[i].permute(1,2,0).detach().cpu(), nms_boxes)
519
+
520
+
521
+
522
+ def seed_everything(seed=42):
523
+ os.environ['PYTHONHASHSEED'] = str(seed)
524
+ random.seed(seed)
525
+ np.random.seed(seed)
526
+ torch.manual_seed(seed)
527
+ torch.cuda.manual_seed(seed)
528
+ torch.cuda.manual_seed_all(seed)
529
+ torch.backends.cudnn.deterministic = True
530
+ torch.backends.cudnn.benchmark = False
531
+
532
+
533
+ def clip_coords(boxes, img_shape):
534
+ # Clip bounding xyxy bounding boxes to image shape (height, width)
535
+ boxes[:, 0].clamp_(0, img_shape[1]) # x1
536
+ boxes[:, 1].clamp_(0, img_shape[0]) # y1
537
+ boxes[:, 2].clamp_(0, img_shape[1]) # x2
538
+ boxes[:, 3].clamp_(0, img_shape[0]) # y2
539
+
540
+ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
541
+ # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
542
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
543
+ y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x
544
+ y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y
545
+ y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x
546
+ y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y
547
+ return y
548
+
549
+
550
+ def xyn2xy(x, w=640, h=640, padw=0, padh=0):
551
+ # Convert normalized segments into pixel segments, shape (n,2)
552
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
553
+ y[..., 0] = w * x[..., 0] + padw # top left x
554
+ y[..., 1] = h * x[..., 1] + padh # top left y
555
+ return y
556
+
557
+ def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
558
+ # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
559
+ if clip:
560
+ clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip
561
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
562
+ y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center
563
+ y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center
564
+ y[..., 2] = (x[..., 2] - x[..., 0]) / w # width
565
+ y[..., 3] = (x[..., 3] - x[..., 1]) / h # height
566
+ return y
567
+
568
+ def clip_boxes(boxes, shape):
569
+ # Clip boxes (xyxy) to image shape (height, width)
570
+ if isinstance(boxes, torch.Tensor): # faster individually
571
+ boxes[..., 0].clamp_(0, shape[1]) # x1
572
+ boxes[..., 1].clamp_(0, shape[0]) # y1
573
+ boxes[..., 2].clamp_(0, shape[1]) # x2
574
+ boxes[..., 3].clamp_(0, shape[0]) # y2
575
+ else: # np.array (faster grouped)
576
+ boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2
577
+ boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2