|
import os |
|
import json |
|
import shutil |
|
from pathlib import Path |
|
import re |
|
|
|
def extract_info_from_filename(filename, attack_type): |
|
"""Extract relevant information from filename.""" |
|
|
|
label = 'True' if 'labelTrue' in filename else 'False' if 'labelFalse' in filename else None |
|
|
|
|
|
pred_match = re.search(r'pred(\d+)', filename) |
|
prediction = int(pred_match.group(1)) if pred_match else float('nan') |
|
|
|
|
|
base_num_match = re.search(r'[_](\d+)(?:_|\.)', filename) |
|
base_num = base_num_match.group(1) if base_num_match else None |
|
|
|
|
|
if filename.startswith('adv_'): |
|
img_type = 'adversarial' |
|
elif filename.startswith('orig_'): |
|
img_type = 'original' |
|
elif filename.startswith(('perturbation_', 'transformation_')): |
|
img_type = 'perturbation' |
|
else: |
|
img_type = None |
|
|
|
return label, prediction, img_type, base_num |
|
|
|
def create_new_filename(filename, attack_name, base_num): |
|
"""Create new filename with attack name and PCam-style numbering.""" |
|
|
|
name_parts = filename.rsplit('.', 1) |
|
extension = name_parts[1] if len(name_parts) > 1 else 'png' |
|
|
|
if filename.startswith(('perturbation_', 'transformation_')): |
|
prefix = 'perturbation_' if filename.startswith('perturbation_') else 'transformation_' |
|
return f"{prefix}{base_num}_{attack_name}.{extension}" |
|
elif filename.startswith('adv_'): |
|
return f"adv_{base_num}_{attack_name}.{extension}" |
|
elif filename.startswith('orig_'): |
|
return f"orig_{base_num}_{attack_name}.{extension}" |
|
|
|
return filename |
|
|
|
def determine_attack_category(path): |
|
"""Determine if the attack is black box or non-black box based on path.""" |
|
path_str = str(path).lower() |
|
if "black_box_attacks" in path_str: |
|
return "black_box_attacks" |
|
elif "non_black_box_attacks" in path_str: |
|
return "non_black_box_attacks" |
|
return None |
|
|
|
def organize_dataset(base_path, cleanup_original=False): |
|
""" |
|
Organize dataset into PCam-style structure with only train split. |
|
""" |
|
base_path = Path(base_path) |
|
|
|
|
|
output_base = base_path / "organized_dataset" |
|
labels = ['0', '1'] |
|
|
|
|
|
for label in labels: |
|
(output_base / 'train' / label).mkdir(parents=True, exist_ok=True) |
|
|
|
(output_base / 'perturbations').mkdir(parents=True, exist_ok=True) |
|
(output_base / 'originals').mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
dataset_entries = [] |
|
file_groups = {} |
|
|
|
|
|
for root, _, files in os.walk(base_path): |
|
for file in files: |
|
if file.endswith(('.png', '.jpg', '.jpeg')) and file != '.DS_Store': |
|
full_path = Path(root) / file |
|
|
|
|
|
attack_category = determine_attack_category(full_path) |
|
if not attack_category: |
|
continue |
|
|
|
|
|
attack_type = full_path.parent.name |
|
if attack_type in ['black_box_attacks', 'non_black_box_attacks', '.DS_Store']: |
|
continue |
|
|
|
|
|
label, prediction, img_type, base_num = extract_info_from_filename(file, attack_type) |
|
if base_num: |
|
key = (base_num, attack_type, attack_category) |
|
if key not in file_groups: |
|
file_groups[key] = [] |
|
file_groups[key].append((full_path, label, prediction, img_type)) |
|
|
|
|
|
for key, files in file_groups.items(): |
|
base_num, attack_type, attack_category = key |
|
|
|
entry = { |
|
"attack": attack_type, |
|
"type": attack_category, |
|
"perturbation": None, |
|
"adversarial": None, |
|
"original": [], |
|
"label": None, |
|
"prediction": None |
|
} |
|
|
|
|
|
for file_path, label, prediction, img_type in files: |
|
if img_type == 'adversarial' and label: |
|
entry["label"] = 1 if label == "True" else 0 |
|
entry["prediction"] = prediction |
|
break |
|
|
|
if entry["label"] is None: |
|
continue |
|
|
|
|
|
label_str = str(entry["label"]) |
|
dest_folder = output_base / 'train' / label_str |
|
|
|
for file_path, _, _, img_type in files: |
|
old_filename = file_path.name |
|
new_filename = create_new_filename(old_filename, attack_type, base_num) |
|
|
|
|
|
if img_type == 'perturbation': |
|
dest = output_base / 'perturbations' |
|
rel_path = f"perturbations/{new_filename}" |
|
elif img_type == 'original': |
|
dest = output_base / 'originals' |
|
rel_path = f"originals/{new_filename}" |
|
else: |
|
dest = dest_folder |
|
rel_path = f"train/{label_str}/{new_filename}" |
|
|
|
|
|
shutil.copy2(file_path, dest / new_filename) |
|
if img_type == 'perturbation': |
|
entry["perturbation"] = rel_path |
|
elif img_type == 'adversarial': |
|
entry["adversarial"] = rel_path |
|
elif img_type == 'original': |
|
entry["original"].append(rel_path) |
|
|
|
|
|
if entry["perturbation"] or entry["adversarial"] or entry["original"]: |
|
dataset_entries.append(entry) |
|
|
|
|
|
hf_dataset = { |
|
"train": { |
|
"features": { |
|
"image_path": {"dtype": "string", "_type": "Value"}, |
|
"label": {"dtype": "int64", "_type": "Value"}, |
|
"prediction": {"dtype": "int64", "_type": "Value"}, |
|
"attack": {"dtype": "string", "_type": "Value"}, |
|
"attack_type": {"dtype": "string", "_type": "Value"}, |
|
"perturbation_path": {"dtype": "string", "_type": "Value"}, |
|
"original_path": {"dtype": "string", "_type": "Value"} |
|
}, |
|
"rows": [] |
|
} |
|
} |
|
|
|
|
|
for entry in dataset_entries: |
|
if entry["adversarial"]: |
|
hf_entry = { |
|
"image_path": entry["adversarial"], |
|
"label": entry["label"], |
|
"prediction": entry["prediction"] if entry["prediction"] is not None else -1, |
|
"attack": entry["attack"], |
|
"attack_type": entry["type"], |
|
"perturbation_path": entry["perturbation"] if entry["perturbation"] else "", |
|
"original_path": entry["original"][0] if entry["original"] else "" |
|
} |
|
hf_dataset["train"]["rows"].append(hf_entry) |
|
|
|
|
|
with open(output_base / "dataset.json", 'w') as f: |
|
json.dump(hf_dataset, f, indent=4) |
|
|
|
|
|
if cleanup_original: |
|
print("Cleaning up original files...") |
|
for folder in ['black_box_attacks', 'non_black_box_attacks']: |
|
folder_path = base_path / folder |
|
if folder_path.exists(): |
|
shutil.rmtree(folder_path) |
|
print(f"Deleted {folder}") |
|
|
|
return output_base |
|
|
|
if __name__ == "__main__": |
|
|
|
cleanup = input("Do you want to delete original files after organization? (yes/no): ").lower() == 'yes' |
|
|
|
|
|
script_dir = Path(__file__).parent |
|
output_path = organize_dataset(script_dir, cleanup) |
|
print(f"Dataset organized and saved to: {output_path}") |
|
|