adversarial_pcam / prepare_dataset.py
Venkata Pydipalli
Changes to the dataset structure according to huggingface.
a48c62f
import os
import json
import shutil
from pathlib import Path
import re
def extract_info_from_filename(filename, attack_type):
"""Extract relevant information from filename."""
# Extract label information (if exists)
label = 'True' if 'labelTrue' in filename else 'False' if 'labelFalse' in filename else None
# Extract prediction if it exists
pred_match = re.search(r'pred(\d+)', filename)
prediction = int(pred_match.group(1)) if pred_match else float('nan')
# Extract base number using different patterns
base_num_match = re.search(r'[_](\d+)(?:_|\.)', filename)
base_num = base_num_match.group(1) if base_num_match else None
# Extract image type based on prefix and attack type
if filename.startswith('adv_'):
img_type = 'adversarial'
elif filename.startswith('orig_'):
img_type = 'original'
elif filename.startswith(('perturbation_', 'transformation_')):
img_type = 'perturbation'
else:
img_type = None
return label, prediction, img_type, base_num
def create_new_filename(filename, attack_name, base_num):
"""Create new filename with attack name and PCam-style numbering."""
# Split filename into parts
name_parts = filename.rsplit('.', 1)
extension = name_parts[1] if len(name_parts) > 1 else 'png'
if filename.startswith(('perturbation_', 'transformation_')):
prefix = 'perturbation_' if filename.startswith('perturbation_') else 'transformation_'
return f"{prefix}{base_num}_{attack_name}.{extension}"
elif filename.startswith('adv_'):
return f"adv_{base_num}_{attack_name}.{extension}"
elif filename.startswith('orig_'):
return f"orig_{base_num}_{attack_name}.{extension}"
return filename
def determine_attack_category(path):
"""Determine if the attack is black box or non-black box based on path."""
path_str = str(path).lower()
if "black_box_attacks" in path_str:
return "black_box_attacks"
elif "non_black_box_attacks" in path_str:
return "non_black_box_attacks"
return None
def organize_dataset(base_path, cleanup_original=False):
"""
Organize dataset into PCam-style structure with only train split.
"""
base_path = Path(base_path)
# Create output directories in PCam style
output_base = base_path / "organized_dataset"
labels = ['0', '1'] # PCam uses 0/1 instead of False/True
# Create directory structure
for label in labels:
(output_base / 'train' / label).mkdir(parents=True, exist_ok=True)
# Create perturbations and originals directories
(output_base / 'perturbations').mkdir(parents=True, exist_ok=True)
(output_base / 'originals').mkdir(parents=True, exist_ok=True)
# Dictionary to store dataset information
dataset_entries = []
file_groups = {}
# Walk through the directory
for root, _, files in os.walk(base_path):
for file in files:
if file.endswith(('.png', '.jpg', '.jpeg')) and file != '.DS_Store':
full_path = Path(root) / file
# Determine attack category
attack_category = determine_attack_category(full_path)
if not attack_category:
continue
# Extract attack type from path
attack_type = full_path.parent.name
if attack_type in ['black_box_attacks', 'non_black_box_attacks', '.DS_Store']:
continue
# Extract file information
label, prediction, img_type, base_num = extract_info_from_filename(file, attack_type)
if base_num:
key = (base_num, attack_type, attack_category)
if key not in file_groups:
file_groups[key] = []
file_groups[key].append((full_path, label, prediction, img_type))
# Process each group of files
for key, files in file_groups.items():
base_num, attack_type, attack_category = key
entry = {
"attack": attack_type,
"type": attack_category,
"perturbation": None,
"adversarial": None,
"original": [],
"label": None,
"prediction": None
}
# First pass to find label from adversarial examples
for file_path, label, prediction, img_type in files:
if img_type == 'adversarial' and label:
entry["label"] = 1 if label == "True" else 0
entry["prediction"] = prediction
break
if entry["label"] is None:
continue
# Second pass to organize files
label_str = str(entry["label"])
dest_folder = output_base / 'train' / label_str
for file_path, _, _, img_type in files:
old_filename = file_path.name
new_filename = create_new_filename(old_filename, attack_type, base_num)
# Determine destination folder and path based on image type
if img_type == 'perturbation':
dest = output_base / 'perturbations'
rel_path = f"perturbations/{new_filename}"
elif img_type == 'original':
dest = output_base / 'originals'
rel_path = f"originals/{new_filename}"
else: # adversarial images go to train folders
dest = dest_folder
rel_path = f"train/{label_str}/{new_filename}"
# Copy file to the appropriate folder
shutil.copy2(file_path, dest / new_filename)
if img_type == 'perturbation':
entry["perturbation"] = rel_path
elif img_type == 'adversarial':
entry["adversarial"] = rel_path
elif img_type == 'original':
entry["original"].append(rel_path)
# Only add entries that have at least one image path
if entry["perturbation"] or entry["adversarial"] or entry["original"]:
dataset_entries.append(entry)
# Create Hugging Face compatible dataset.json
hf_dataset = {
"train": {
"features": {
"image_path": {"dtype": "string", "_type": "Value"},
"label": {"dtype": "int64", "_type": "Value"},
"prediction": {"dtype": "int64", "_type": "Value"},
"attack": {"dtype": "string", "_type": "Value"},
"attack_type": {"dtype": "string", "_type": "Value"},
"perturbation_path": {"dtype": "string", "_type": "Value"},
"original_path": {"dtype": "string", "_type": "Value"}
},
"rows": []
}
}
# Convert entries to Hugging Face format
for entry in dataset_entries:
if entry["adversarial"]: # Only include entries that have adversarial images
hf_entry = {
"image_path": entry["adversarial"],
"label": entry["label"],
"prediction": entry["prediction"] if entry["prediction"] is not None else -1,
"attack": entry["attack"],
"attack_type": entry["type"],
"perturbation_path": entry["perturbation"] if entry["perturbation"] else "",
"original_path": entry["original"][0] if entry["original"] else ""
}
hf_dataset["train"]["rows"].append(hf_entry)
# Save Hugging Face compatible dataset.json
with open(output_base / "dataset.json", 'w') as f:
json.dump(hf_dataset, f, indent=4)
# If cleanup is requested and everything was successful
if cleanup_original:
print("Cleaning up original files...")
for folder in ['black_box_attacks', 'non_black_box_attacks']:
folder_path = base_path / folder
if folder_path.exists():
shutil.rmtree(folder_path)
print(f"Deleted {folder}")
return output_base
if __name__ == "__main__":
# Ask user about cleanup
cleanup = input("Do you want to delete original files after organization? (yes/no): ").lower() == 'yes'
# Script will work relative to its location
script_dir = Path(__file__).parent
output_path = organize_dataset(script_dir, cleanup)
print(f"Dataset organized and saved to: {output_path}")