Spaces:
Sleeping
Sleeping
Push updates from pre-prod
Browse files- app.py +404 -24
- requirements.txt +0 -2
app.py
CHANGED
@@ -26,6 +26,85 @@ css = """
|
|
26 |
}
|
27 |
"""
|
28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
def basic_cifar10_model():
|
30 |
'''
|
31 |
Load an example CIFAR10 model
|
@@ -112,6 +191,186 @@ def basic_cifar10_model():
|
|
112 |
)
|
113 |
return jptc
|
114 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
def clf_evasion_evaluate(*args):
|
116 |
'''
|
117 |
Run a classification task evaluation
|
@@ -421,9 +680,6 @@ def show_target_label_dataframe(dataset_type):
|
|
421 |
with gr.Blocks(css=css, theme='xiaobaiyuan/theme_brief') as demo:
|
422 |
gr.Markdown("<h1>HEART Adversarial Robustness Gradio Example</h1>")
|
423 |
|
424 |
-
with gr.Tab("Info"):
|
425 |
-
gr.Markdown('This is step 1. Using the tabs, select a task for evaluation.')
|
426 |
-
|
427 |
with gr.Tab("Classification", elem_classes="task-tab"):
|
428 |
gr.Markdown("Classifying images with a set of categories.")
|
429 |
|
@@ -463,19 +719,19 @@ with gr.Blocks(css=css, theme='xiaobaiyuan/theme_brief') as demo:
|
|
463 |
with gr.Row():
|
464 |
|
465 |
with gr.Tab("Info"):
|
466 |
-
gr.Markdown("This is step
|
467 |
|
468 |
with gr.Tab("White Box"):
|
469 |
gr.Markdown("White box attacks assume the attacker has __full access__ to the model.")
|
470 |
|
471 |
with gr.Tab("Info"):
|
472 |
-
gr.Markdown("This is step
|
473 |
|
474 |
with gr.Tab("Evasion"):
|
475 |
gr.Markdown("Evasion attacks are deployed to cause a model to incorrectly classify or detect items/objects in an image.")
|
476 |
|
477 |
with gr.Tab("Info"):
|
478 |
-
gr.Markdown("This is step
|
479 |
|
480 |
with gr.Tab("Projected Gradient Descent"):
|
481 |
gr.Markdown("This attack uses PGD to identify adversarial examples.")
|
@@ -483,11 +739,11 @@ with gr.Blocks(css=css, theme='xiaobaiyuan/theme_brief') as demo:
|
|
483 |
|
484 |
with gr.Row():
|
485 |
|
486 |
-
with gr.Column():
|
487 |
attack = gr.Textbox(visible=True, value="PGD", label="Attack", interactive=False)
|
488 |
-
max_iter = gr.Slider(minimum=1, maximum=
|
489 |
-
eps = gr.Slider(minimum=0.
|
490 |
-
eps_steps = gr.Slider(minimum=0.
|
491 |
targeted = gr.Textbox(placeholder="Target label (integer)", label="Target")
|
492 |
with gr.Accordion("Target mapping", open=False):
|
493 |
cifar_labels = gr.Dataframe(pd.DataFrame(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'],
|
@@ -504,7 +760,7 @@ with gr.Blocks(css=css, theme='xiaobaiyuan/theme_brief') as demo:
|
|
504 |
dataset_type.change(show_target_label_dataframe, dataset_type, [cifar_labels, xview_labels])
|
505 |
|
506 |
# Evaluation Output. Visualisations of success/failures of running evaluation attacks.
|
507 |
-
with gr.Column():
|
508 |
with gr.Row():
|
509 |
with gr.Column():
|
510 |
original_gallery = gr.Gallery(label="Original", preview=True, height=600)
|
@@ -529,20 +785,18 @@ with gr.Blocks(css=css, theme='xiaobaiyuan/theme_brief') as demo:
|
|
529 |
clear_btn = gr.ClearButton([image, targeted, original_gallery, benign_output, clean_accuracy,
|
530 |
adversarial_gallery, adversarial_output, robust_accuracy, perturbation_added])
|
531 |
|
532 |
-
|
533 |
-
|
534 |
with gr.Tab("Adversarial Patch"):
|
535 |
gr.Markdown("This attack crafts an adversarial patch that facilitates evasion.")
|
536 |
|
537 |
with gr.Row():
|
538 |
|
539 |
-
with gr.Column():
|
540 |
attack = gr.Textbox(visible=True, value="Adversarial Patch", label="Attack", interactive=False)
|
541 |
-
max_iter = gr.Slider(minimum=1, maximum=
|
542 |
-
x_location = gr.Slider(minimum=1, maximum=640, label="Location (x)", value=18)
|
543 |
-
y_location = gr.Slider(minimum=1, maximum=480, label="Location (y)", value=18)
|
544 |
-
patch_height = gr.Slider(minimum=1, maximum=640, label="Patch height", value=18)
|
545 |
-
patch_width = gr.Slider(minimum=1, maximum=480, label="Patch width", value=18)
|
546 |
targeted = gr.Textbox(placeholder="Target label (integer)", label="Target")
|
547 |
with gr.Accordion("Target mapping", open=False):
|
548 |
cifar_labels = gr.Dataframe(pd.DataFrame(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'],
|
@@ -559,7 +813,7 @@ with gr.Blocks(css=css, theme='xiaobaiyuan/theme_brief') as demo:
|
|
559 |
dataset_type.change(show_target_label_dataframe, dataset_type, [cifar_labels, xview_labels])
|
560 |
|
561 |
# Evaluation Output. Visualisations of success/failures of running evaluation attacks.
|
562 |
-
with gr.Column():
|
563 |
with gr.Row():
|
564 |
with gr.Column():
|
565 |
original_gallery = gr.Gallery(label="Original", preview=True, height=600)
|
@@ -591,14 +845,14 @@ with gr.Blocks(css=css, theme='xiaobaiyuan/theme_brief') as demo:
|
|
591 |
gr.Markdown("Black box attacks assume the attacker __does not__ have full access to the model but can query it for predictions.")
|
592 |
|
593 |
with gr.Tab("Info"):
|
594 |
-
gr.Markdown("This is step
|
595 |
|
596 |
with gr.Tab("Evasion"):
|
597 |
|
598 |
gr.Markdown("Evasion attacks are deployed to cause a model to incorrectly classify or detect items/objects in an image.")
|
599 |
|
600 |
with gr.Tab("Info"):
|
601 |
-
gr.Markdown("This is step
|
602 |
|
603 |
with gr.Tab("HopSkipJump"):
|
604 |
gr.Markdown("Coming soon.")
|
@@ -612,7 +866,133 @@ with gr.Blocks(css=css, theme='xiaobaiyuan/theme_brief') as demo:
|
|
612 |
|
613 |
with gr.Tab("Object Detection"):
|
614 |
gr.Markdown("Extracting objects from images and identifying their category.")
|
615 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
616 |
|
617 |
if __name__ == "__main__":
|
618 |
|
@@ -629,5 +1009,5 @@ if __name__ == "__main__":
|
|
629 |
subprocess.run([sys.executable, '-m', 'pip', 'install', HEART_INSTALL])
|
630 |
|
631 |
# during development, set debug=True
|
632 |
-
demo.launch()
|
633 |
|
|
|
26 |
}
|
27 |
"""
|
28 |
|
29 |
+
def extract_predictions(predictions_, conf_thresh):
|
30 |
+
coco_labels = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
|
31 |
+
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
|
32 |
+
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
|
33 |
+
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
|
34 |
+
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
|
35 |
+
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
|
36 |
+
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard',
|
37 |
+
'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
|
38 |
+
'teddy bear', 'hair drier', 'toothbrush']
|
39 |
+
# Get the predicted class
|
40 |
+
predictions_class = [coco_labels[i] for i in list(predictions_["labels"])]
|
41 |
+
# print("\npredicted classes:", predictions_class)
|
42 |
+
if len(predictions_class) < 1:
|
43 |
+
return [], [], []
|
44 |
+
# Get the predicted bounding boxes
|
45 |
+
predictions_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(predictions_["boxes"])]
|
46 |
+
|
47 |
+
# Get the predicted prediction score
|
48 |
+
predictions_score = list(predictions_["scores"])
|
49 |
+
# print("predicted score:", predictions_score)
|
50 |
+
|
51 |
+
# Get a list of index with score greater than threshold
|
52 |
+
threshold = conf_thresh
|
53 |
+
predictions_t = [predictions_score.index(x) for x in predictions_score if x > threshold]
|
54 |
+
if len(predictions_t) > 0:
|
55 |
+
predictions_t = predictions_t # [-1] #indices where score over threshold
|
56 |
+
else:
|
57 |
+
# no predictions esxceeding threshold
|
58 |
+
return [], [], []
|
59 |
+
# predictions in score order
|
60 |
+
predictions_boxes = [predictions_boxes[i] for i in predictions_t]
|
61 |
+
predictions_class = [predictions_class[i] for i in predictions_t]
|
62 |
+
predictions_scores = [predictions_score[i] for i in predictions_t]
|
63 |
+
return predictions_class, predictions_boxes, predictions_scores
|
64 |
+
|
65 |
+
def plot_image_with_boxes(img, boxes, pred_cls, title):
|
66 |
+
import cv2
|
67 |
+
text_size = 1
|
68 |
+
text_th = 2
|
69 |
+
rect_th = 1
|
70 |
+
|
71 |
+
sections = []
|
72 |
+
for i in range(len(boxes)):
|
73 |
+
cv2.rectangle(img, (int(boxes[i][0][0]), int(boxes[i][0][1])), (int(boxes[i][1][0]), int(boxes[i][1][1])),
|
74 |
+
color=(0, 255, 0), thickness=rect_th)
|
75 |
+
# Write the prediction class
|
76 |
+
cv2.putText(img, pred_cls[i], (int(boxes[i][0][0]), int(boxes[i][0][1])), cv2.FONT_HERSHEY_SIMPLEX, text_size,
|
77 |
+
(0, 255, 0), thickness=text_th)
|
78 |
+
sections.append( ((int(boxes[i][0][0]),
|
79 |
+
int(boxes[i][0][1]),
|
80 |
+
int(boxes[i][1][0]),
|
81 |
+
int(boxes[i][1][1])), (pred_cls[i])) )
|
82 |
+
|
83 |
+
|
84 |
+
return img.astype(np.uint8)
|
85 |
+
|
86 |
+
def filter_boxes(predictions, conf_thresh):
|
87 |
+
dictionary = {}
|
88 |
+
|
89 |
+
boxes_list = []
|
90 |
+
scores_list = []
|
91 |
+
labels_list = []
|
92 |
+
|
93 |
+
for i in range(len(predictions[0]["boxes"])):
|
94 |
+
score = predictions[0]["scores"][i]
|
95 |
+
if score >= conf_thresh:
|
96 |
+
boxes_list.append(predictions[0]["boxes"][i])
|
97 |
+
scores_list.append(predictions[0]["scores"][[i]])
|
98 |
+
labels_list.append(predictions[0]["labels"][[i]])
|
99 |
+
|
100 |
+
dictionary["boxes"] = np.vstack(boxes_list)
|
101 |
+
dictionary["scores"] = np.hstack(scores_list)
|
102 |
+
dictionary["labels"] = np.hstack(labels_list)
|
103 |
+
|
104 |
+
y = [dictionary]
|
105 |
+
|
106 |
+
return y
|
107 |
+
|
108 |
def basic_cifar10_model():
|
109 |
'''
|
110 |
Load an example CIFAR10 model
|
|
|
191 |
)
|
192 |
return jptc
|
193 |
|
194 |
+
def det_evasion_evaluate(*args):
|
195 |
+
'''
|
196 |
+
Run a detection task evaluation
|
197 |
+
'''
|
198 |
+
|
199 |
+
attack = args[0]
|
200 |
+
model_type = args[1]
|
201 |
+
|
202 |
+
box_thresh = args[-3]
|
203 |
+
dataset_type = args[-2]
|
204 |
+
image = args[-1]
|
205 |
+
|
206 |
+
if dataset_type == "COCO":
|
207 |
+
from torchvision.transforms import transforms
|
208 |
+
import requests
|
209 |
+
from PIL import Image
|
210 |
+
NUMBER_CHANNELS = 3
|
211 |
+
INPUT_SHAPE = (NUMBER_CHANNELS, 640, 640)
|
212 |
+
|
213 |
+
transform = transforms.Compose([
|
214 |
+
transforms.Resize(INPUT_SHAPE[1], interpolation=transforms.InterpolationMode.BICUBIC),
|
215 |
+
transforms.CenterCrop(INPUT_SHAPE[1]),
|
216 |
+
transforms.ToTensor()
|
217 |
+
])
|
218 |
+
|
219 |
+
urls = ['http://images.cocodataset.org/val2017/000000039769.jpg',
|
220 |
+
'http://images.cocodataset.org/val2017/000000397133.jpg',
|
221 |
+
'http://images.cocodataset.org/val2017/000000037777.jpg',
|
222 |
+
'http://images.cocodataset.org/val2017/000000454661.jpg',
|
223 |
+
'http://images.cocodataset.org/val2017/000000094852.jpg']
|
224 |
+
|
225 |
+
coco_images = []
|
226 |
+
for url in urls:
|
227 |
+
im = Image.open(requests.get(url, stream=True).raw)
|
228 |
+
im = transform(im).numpy()
|
229 |
+
coco_images.append(im)
|
230 |
+
image = np.array(coco_images)*255
|
231 |
+
|
232 |
+
if model_type == "YOLOv5":
|
233 |
+
from heart.estimators.object_detection.pytorch_yolo import JaticPyTorchYolo
|
234 |
+
coco_labels = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
|
235 |
+
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
|
236 |
+
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
|
237 |
+
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
|
238 |
+
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
|
239 |
+
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
|
240 |
+
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard',
|
241 |
+
'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
|
242 |
+
'teddy bear', 'hair drier', 'toothbrush']
|
243 |
+
detector = JaticPyTorchYolo(device_type='cpu',
|
244 |
+
input_shape=(3, 640, 640),
|
245 |
+
clip_values=(0, 255),
|
246 |
+
attack_losses=("loss_total", "loss_cls",
|
247 |
+
"loss_box",
|
248 |
+
"loss_obj"),
|
249 |
+
labels=coco_labels)
|
250 |
+
|
251 |
+
if attack=="PGD":
|
252 |
+
|
253 |
+
from art.attacks.evasion import ProjectedGradientDescent
|
254 |
+
from heart.attacks.attack import JaticAttack
|
255 |
+
from heart.metrics import AccuracyPerturbationMetric
|
256 |
+
from torch.nn.functional import softmax
|
257 |
+
from maite.protocols import HasDataImage, is_typed_dict
|
258 |
+
|
259 |
+
pgd_attack = ProjectedGradientDescent(estimator=detector, max_iter=args[7], eps=args[8],
|
260 |
+
eps_step=args[9], targeted=args[10]!="")
|
261 |
+
attack = JaticAttack(pgd_attack)
|
262 |
+
|
263 |
+
benign_output = detector(image)
|
264 |
+
|
265 |
+
dets = [{'boxes': benign_output.boxes[i],
|
266 |
+
'scores': benign_output.scores[i],
|
267 |
+
'labels': benign_output.labels[i]} for i in range(len(image))]
|
268 |
+
|
269 |
+
y = [filter_boxes([t], 0.8)[0] for t in dets]
|
270 |
+
if args[10]!="":
|
271 |
+
data = {'image': image[[0]], 'label': y[-1:]}
|
272 |
+
else:
|
273 |
+
data = image
|
274 |
+
|
275 |
+
|
276 |
+
output = attack.run_attack(data=data)
|
277 |
+
adv_output = detector(output.adversarial_examples)
|
278 |
+
out_imgs = []
|
279 |
+
for i in range(len(output.adversarial_examples)):
|
280 |
+
pred = {'boxes': adv_output.boxes[i],
|
281 |
+
'scores': adv_output.scores[i],
|
282 |
+
'labels': adv_output.labels[i]}
|
283 |
+
preds_orig = extract_predictions(pred, box_thresh)
|
284 |
+
out_img = plot_image_with_boxes(img=output.adversarial_examples[i].transpose(1,2,0).copy(),
|
285 |
+
boxes=preds_orig[1], pred_cls=preds_orig[0], title="Detections")
|
286 |
+
out_imgs.append(out_img)
|
287 |
+
|
288 |
+
out_imgs_benign = []
|
289 |
+
for i in range(len(image)):
|
290 |
+
pred = {'boxes': benign_output.boxes[i],
|
291 |
+
'scores': benign_output.scores[i],
|
292 |
+
'labels': benign_output.labels[i]}
|
293 |
+
preds_benign = extract_predictions(pred, box_thresh)
|
294 |
+
out_img = plot_image_with_boxes(img=image[i].transpose(1,2,0).copy(),
|
295 |
+
boxes=preds_benign[1], pred_cls=preds_benign[0], title="Detections")
|
296 |
+
out_imgs_benign.append(out_img)
|
297 |
+
|
298 |
+
|
299 |
+
image = []
|
300 |
+
for i, img in enumerate(out_imgs_benign):
|
301 |
+
image.append(img.astype(np.uint8))
|
302 |
+
|
303 |
+
adv_imgs = []
|
304 |
+
for i, img in enumerate(out_imgs):
|
305 |
+
adv_imgs.append(img.astype(np.uint8))
|
306 |
+
|
307 |
+
return [image, adv_imgs]
|
308 |
+
|
309 |
+
elif attack=="Adversarial Patch":
|
310 |
+
from art.attacks.evasion.adversarial_patch.adversarial_patch_pytorch import AdversarialPatchPyTorch
|
311 |
+
from heart.attacks.attack import JaticAttack
|
312 |
+
from heart.metrics import AccuracyPerturbationMetric
|
313 |
+
from torch.nn.functional import softmax
|
314 |
+
from maite.protocols import HasDataImage, is_typed_dict
|
315 |
+
|
316 |
+
|
317 |
+
batch_size = 16
|
318 |
+
scale_min = 0.3
|
319 |
+
scale_max = 1.0
|
320 |
+
rotation_max = 0
|
321 |
+
learning_rate = 5000.
|
322 |
+
|
323 |
+
patch_attack = AdversarialPatchPyTorch(estimator=detector, rotation_max=rotation_max, patch_location=(args[8], args[9]),
|
324 |
+
scale_min=scale_min, scale_max=scale_max, patch_type='circle',
|
325 |
+
learning_rate=learning_rate, max_iter=args[7], batch_size=batch_size,
|
326 |
+
patch_shape=(3, args[10], args[11]), verbose=False, targeted=args[-4]=="Yes")
|
327 |
+
|
328 |
+
attack = JaticAttack(patch_attack)
|
329 |
+
|
330 |
+
benign_output = detector(image)
|
331 |
+
|
332 |
+
dets = [{'boxes': benign_output.boxes[i],
|
333 |
+
'scores': benign_output.scores[i],
|
334 |
+
'labels': benign_output.labels[i]} for i in range(len(image))]
|
335 |
+
|
336 |
+
if args[-4]=="Yes":
|
337 |
+
data = {'image': image, 'label':[dets[-1] for i in image]}
|
338 |
+
else:
|
339 |
+
data = {'image': image, 'label': dets}
|
340 |
+
|
341 |
+
output = attack.run_attack(data=data)
|
342 |
+
adv_output = detector(output.adversarial_examples)
|
343 |
+
out_imgs = []
|
344 |
+
for i in range(len(output.adversarial_examples)):
|
345 |
+
pred = {'boxes': adv_output.boxes[i],
|
346 |
+
'scores': adv_output.scores[i],
|
347 |
+
'labels': adv_output.labels[i]}
|
348 |
+
preds_orig = extract_predictions(pred, box_thresh)
|
349 |
+
out_img = plot_image_with_boxes(img=output.adversarial_examples[i].transpose(1,2,0).copy(),
|
350 |
+
boxes=preds_orig[1], pred_cls=preds_orig[0], title="Detections")
|
351 |
+
out_imgs.append(out_img)
|
352 |
+
|
353 |
+
out_imgs_benign = []
|
354 |
+
for i in range(len(image)):
|
355 |
+
pred = {'boxes': benign_output.boxes[i],
|
356 |
+
'scores': benign_output.scores[i],
|
357 |
+
'labels': benign_output.labels[i]}
|
358 |
+
preds_benign = extract_predictions(pred, box_thresh)
|
359 |
+
out_img = plot_image_with_boxes(img=image[i].transpose(1,2,0).copy(),
|
360 |
+
boxes=preds_benign[1], pred_cls=preds_benign[0], title="Detections")
|
361 |
+
out_imgs_benign.append(out_img)
|
362 |
+
|
363 |
+
|
364 |
+
image = []
|
365 |
+
for i, img in enumerate(out_imgs_benign):
|
366 |
+
image.append(img.astype(np.uint8))
|
367 |
+
|
368 |
+
adv_imgs = []
|
369 |
+
for i, img in enumerate(out_imgs):
|
370 |
+
adv_imgs.append(img.astype(np.uint8))
|
371 |
+
|
372 |
+
return [image, adv_imgs]
|
373 |
+
|
374 |
def clf_evasion_evaluate(*args):
|
375 |
'''
|
376 |
Run a classification task evaluation
|
|
|
680 |
with gr.Blocks(css=css, theme='xiaobaiyuan/theme_brief') as demo:
|
681 |
gr.Markdown("<h1>HEART Adversarial Robustness Gradio Example</h1>")
|
682 |
|
|
|
|
|
|
|
683 |
with gr.Tab("Classification", elem_classes="task-tab"):
|
684 |
gr.Markdown("Classifying images with a set of categories.")
|
685 |
|
|
|
719 |
with gr.Row():
|
720 |
|
721 |
with gr.Tab("Info"):
|
722 |
+
gr.Markdown("This is step 1. Select the type of attack for evaluation.")
|
723 |
|
724 |
with gr.Tab("White Box"):
|
725 |
gr.Markdown("White box attacks assume the attacker has __full access__ to the model.")
|
726 |
|
727 |
with gr.Tab("Info"):
|
728 |
+
gr.Markdown("This is step 2. Select the type of white-box attack to evaluate.")
|
729 |
|
730 |
with gr.Tab("Evasion"):
|
731 |
gr.Markdown("Evasion attacks are deployed to cause a model to incorrectly classify or detect items/objects in an image.")
|
732 |
|
733 |
with gr.Tab("Info"):
|
734 |
+
gr.Markdown("This is step 3. Select the type of Evasion attack to evaluate.")
|
735 |
|
736 |
with gr.Tab("Projected Gradient Descent"):
|
737 |
gr.Markdown("This attack uses PGD to identify adversarial examples.")
|
|
|
739 |
|
740 |
with gr.Row():
|
741 |
|
742 |
+
with gr.Column(scale=1):
|
743 |
attack = gr.Textbox(visible=True, value="PGD", label="Attack", interactive=False)
|
744 |
+
max_iter = gr.Slider(minimum=1, maximum=20, label="Max iterations", value=10, step=1)
|
745 |
+
eps = gr.Slider(minimum=0.03, maximum=1, label="Epslion", value=0.03)
|
746 |
+
eps_steps = gr.Slider(minimum=0.003, maximum=0.99, label="Epsilon steps", value=0.003)
|
747 |
targeted = gr.Textbox(placeholder="Target label (integer)", label="Target")
|
748 |
with gr.Accordion("Target mapping", open=False):
|
749 |
cifar_labels = gr.Dataframe(pd.DataFrame(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'],
|
|
|
760 |
dataset_type.change(show_target_label_dataframe, dataset_type, [cifar_labels, xview_labels])
|
761 |
|
762 |
# Evaluation Output. Visualisations of success/failures of running evaluation attacks.
|
763 |
+
with gr.Column(scale=2):
|
764 |
with gr.Row():
|
765 |
with gr.Column():
|
766 |
original_gallery = gr.Gallery(label="Original", preview=True, height=600)
|
|
|
785 |
clear_btn = gr.ClearButton([image, targeted, original_gallery, benign_output, clean_accuracy,
|
786 |
adversarial_gallery, adversarial_output, robust_accuracy, perturbation_added])
|
787 |
|
|
|
|
|
788 |
with gr.Tab("Adversarial Patch"):
|
789 |
gr.Markdown("This attack crafts an adversarial patch that facilitates evasion.")
|
790 |
|
791 |
with gr.Row():
|
792 |
|
793 |
+
with gr.Column(scale=1):
|
794 |
attack = gr.Textbox(visible=True, value="Adversarial Patch", label="Attack", interactive=False)
|
795 |
+
max_iter = gr.Slider(minimum=1, maximum=20, label="Max iterations", value=2, step=1)
|
796 |
+
x_location = gr.Slider(minimum=1, maximum=640, label="Location (x)", value=18, step=1)
|
797 |
+
y_location = gr.Slider(minimum=1, maximum=480, label="Location (y)", value=18, step=1)
|
798 |
+
patch_height = gr.Slider(minimum=1, maximum=640, label="Patch height", value=18, step=1)
|
799 |
+
patch_width = gr.Slider(minimum=1, maximum=480, label="Patch width", value=18, step=1)
|
800 |
targeted = gr.Textbox(placeholder="Target label (integer)", label="Target")
|
801 |
with gr.Accordion("Target mapping", open=False):
|
802 |
cifar_labels = gr.Dataframe(pd.DataFrame(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'],
|
|
|
813 |
dataset_type.change(show_target_label_dataframe, dataset_type, [cifar_labels, xview_labels])
|
814 |
|
815 |
# Evaluation Output. Visualisations of success/failures of running evaluation attacks.
|
816 |
+
with gr.Column(scale=2):
|
817 |
with gr.Row():
|
818 |
with gr.Column():
|
819 |
original_gallery = gr.Gallery(label="Original", preview=True, height=600)
|
|
|
845 |
gr.Markdown("Black box attacks assume the attacker __does not__ have full access to the model but can query it for predictions.")
|
846 |
|
847 |
with gr.Tab("Info"):
|
848 |
+
gr.Markdown("This is step 2. Select the type of black-box attack to evaluate.")
|
849 |
|
850 |
with gr.Tab("Evasion"):
|
851 |
|
852 |
gr.Markdown("Evasion attacks are deployed to cause a model to incorrectly classify or detect items/objects in an image.")
|
853 |
|
854 |
with gr.Tab("Info"):
|
855 |
+
gr.Markdown("This is step 3. Select the type of Evasion attack to evaluate.")
|
856 |
|
857 |
with gr.Tab("HopSkipJump"):
|
858 |
gr.Markdown("Coming soon.")
|
|
|
866 |
|
867 |
with gr.Tab("Object Detection"):
|
868 |
gr.Markdown("Extracting objects from images and identifying their category.")
|
869 |
+
|
870 |
+
# Model and Dataset Selection
|
871 |
+
with gr.Row():
|
872 |
+
# Model and Dataset type e.g. Torchvision, HuggingFace, local etc.
|
873 |
+
with gr.Column():
|
874 |
+
model_type = gr.Radio(label="Model type", choices=["YOLOv5"],
|
875 |
+
value="YOLOv5")
|
876 |
+
dataset_type = gr.Radio(label="Dataset", choices=["COCO",],
|
877 |
+
value="COCO")
|
878 |
+
|
879 |
+
model_type.change(show_model_params, model_type, model_params)
|
880 |
+
dataset_type.change(show_dataset_params, dataset_type, [dataset_params, local_image])
|
881 |
+
|
882 |
+
# Attack Selection
|
883 |
+
with gr.Row():
|
884 |
+
|
885 |
+
with gr.Tab("Info"):
|
886 |
+
gr.Markdown("This is step 1. Select the type of attack for evaluation.")
|
887 |
+
|
888 |
+
with gr.Tab("White Box"):
|
889 |
+
gr.Markdown("White box attacks assume the attacker has __full access__ to the model.")
|
890 |
+
|
891 |
+
with gr.Tab("Info"):
|
892 |
+
gr.Markdown("This is step 2. Select the type of white-box attack to evaluate.")
|
893 |
+
|
894 |
+
with gr.Tab("Evasion"):
|
895 |
+
gr.Markdown("Evasion attacks are deployed to cause a model to incorrectly classify or detect items/objects in an image.")
|
896 |
+
|
897 |
+
with gr.Tab("Info"):
|
898 |
+
gr.Markdown("This is step 3. Select the type of Evasion attack to evaluate.")
|
899 |
+
|
900 |
+
with gr.Tab("Projected Gradient Descent"):
|
901 |
+
gr.Markdown("This attack uses PGD to identify adversarial examples.")
|
902 |
+
|
903 |
+
|
904 |
+
with gr.Row():
|
905 |
+
|
906 |
+
with gr.Column(scale=1):
|
907 |
+
attack = gr.Textbox(visible=True, value="PGD", label="Attack", interactive=False)
|
908 |
+
max_iter = gr.Slider(minimum=1, maximum=10, label="Max iterations", value=4, step=1)
|
909 |
+
eps = gr.Slider(minimum=8, maximum=255, label="Epslion", value=8, step=1)
|
910 |
+
eps_steps = gr.Slider(minimum=1, maximum=254, label="Epsilon steps", value=1, step=1)
|
911 |
+
targeted = gr.Textbox(placeholder="Target label (integer)", label="Target")
|
912 |
+
det_threshold = gr.Slider(minimum=0.0, maximum=100, label="Detection threshold", value=0.2)
|
913 |
+
eval_btn_pgd = gr.Button("Evaluate")
|
914 |
+
model_clip.change(pgd_update_epsilon, model_clip, eps)
|
915 |
+
|
916 |
+
# Evaluation Output. Visualisations of success/failures of running evaluation attacks.
|
917 |
+
with gr.Column(scale=3):
|
918 |
+
with gr.Row():
|
919 |
+
with gr.Column():
|
920 |
+
original_gallery = gr.Gallery(label="Original", preview=True, show_download_button=True, height=600)
|
921 |
+
|
922 |
+
with gr.Column():
|
923 |
+
adversarial_gallery = gr.Gallery(label="Adversarial", preview=True, show_download_button=True, height=600)
|
924 |
+
|
925 |
+
eval_btn_pgd.click(det_evasion_evaluate, inputs=[attack, model_type, model_path, model_channels, model_height, model_width,
|
926 |
+
model_clip, max_iter, eps, eps_steps, targeted,
|
927 |
+
det_threshold, dataset_type, image],
|
928 |
+
outputs=[original_gallery, adversarial_gallery], api_name='patch')
|
929 |
+
|
930 |
+
with gr.Row():
|
931 |
+
clear_btn = gr.ClearButton([image, original_gallery,
|
932 |
+
adversarial_gallery])
|
933 |
+
|
934 |
+
|
935 |
+
|
936 |
+
with gr.Tab("Adversarial Patch"):
|
937 |
+
gr.Markdown("This attack crafts an adversarial patch that facilitates evasion.")
|
938 |
+
|
939 |
+
with gr.Row():
|
940 |
+
|
941 |
+
with gr.Column(scale=1):
|
942 |
+
attack = gr.Textbox(visible=True, value="Adversarial Patch", label="Attack", interactive=False)
|
943 |
+
max_iter = gr.Slider(minimum=1, maximum=100, label="Max iterations", value=1, step=1)
|
944 |
+
x_location = gr.Slider(minimum=1, maximum=640, label="Location (x)", value=100, step=1)
|
945 |
+
y_location = gr.Slider(minimum=1, maximum=480, label="Location (y)", value=100, step=1)
|
946 |
+
patch_height = gr.Slider(minimum=1, maximum=640, label="Patch height", value=100, step=1)
|
947 |
+
patch_width = gr.Slider(minimum=1, maximum=480, label="Patch width", value=100, step=1)
|
948 |
+
targeted = gr.Radio(choices=['Yes', 'No'], value='No', label="Targeted")
|
949 |
+
det_threshold = gr.Slider(minimum=0.0, maximum=100, label="Detection threshold", value=0.2)
|
950 |
+
eval_btn_patch = gr.Button("Evaluate")
|
951 |
+
model_clip.change()
|
952 |
+
|
953 |
+
# Evaluation Output. Visualisations of success/failures of running evaluation attacks.
|
954 |
+
with gr.Column(scale=3):
|
955 |
+
with gr.Row():
|
956 |
+
with gr.Column():
|
957 |
+
original_gallery = gr.Gallery(label="Original", preview=True, show_download_button=True, height=600)
|
958 |
+
|
959 |
+
with gr.Column():
|
960 |
+
adversarial_gallery = gr.Gallery(label="Adversarial", preview=True, show_download_button=True, height=600)
|
961 |
+
|
962 |
+
dataset_type.change(patch_show_label_output, dataset_type, [adversarial_output, ])
|
963 |
+
eval_btn_patch.click(det_evasion_evaluate, inputs=[attack, model_type, model_path, model_channels, model_height, model_width,
|
964 |
+
model_clip, max_iter, x_location, y_location, patch_height, patch_width, targeted,
|
965 |
+
det_threshold,dataset_type, image],
|
966 |
+
outputs=[original_gallery, adversarial_gallery])
|
967 |
+
|
968 |
+
with gr.Row():
|
969 |
+
clear_btn = gr.ClearButton([image, targeted, original_gallery,
|
970 |
+
adversarial_gallery])
|
971 |
+
|
972 |
+
with gr.Tab("Poisoning"):
|
973 |
+
gr.Markdown("Coming soon.")
|
974 |
+
|
975 |
+
with gr.Tab("Black Box"):
|
976 |
+
gr.Markdown("Black box attacks assume the attacker __does not__ have full access to the model but can query it for predictions.")
|
977 |
+
|
978 |
+
with gr.Tab("Info"):
|
979 |
+
gr.Markdown("This is step 2. Select the type of black-box attack to evaluate.")
|
980 |
+
|
981 |
+
with gr.Tab("Evasion"):
|
982 |
+
|
983 |
+
gr.Markdown("Evasion attacks are deployed to cause a model to incorrectly classify or detect items/objects in an image.")
|
984 |
+
|
985 |
+
with gr.Tab("Info"):
|
986 |
+
gr.Markdown("This is step 3. Select the type of Evasion attack to evaluate.")
|
987 |
+
|
988 |
+
with gr.Tab("HopSkipJump"):
|
989 |
+
gr.Markdown("Coming soon.")
|
990 |
+
|
991 |
+
with gr.Tab("Square Attack"):
|
992 |
+
gr.Markdown("Coming soon.")
|
993 |
+
|
994 |
+
with gr.Tab("AutoAttack"):
|
995 |
+
gr.Markdown("Coming soon.")
|
996 |
|
997 |
if __name__ == "__main__":
|
998 |
|
|
|
1009 |
subprocess.run([sys.executable, '-m', 'pip', 'install', HEART_INSTALL])
|
1010 |
|
1011 |
# during development, set debug=True
|
1012 |
+
demo.launch(debug=True)
|
1013 |
|
requirements.txt
CHANGED
@@ -53,5 +53,3 @@ pycodestyle==2.8.0
|
|
53 |
black==22.3.0
|
54 |
isort==5.12.0
|
55 |
|
56 |
-
|
57 |
-
|
|
|
53 |
black==22.3.0
|
54 |
isort==5.12.0
|
55 |
|
|
|
|