diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..a6344aac8c09253b3b630fb776ae94478aa0275b --- /dev/null +++ b/.gitattributes @@ -0,0 +1,35 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..0d2a38f2db6bd3818f751b64ea745036e3d7218d --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,28 @@ +name: CI + +# Controls when the workflow will run +on: + # Triggers the workflow on push events but only for the "main" branch + push: + branches: [ "main" ] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + sync-to-hub: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Add remote + env: + HF_TOKEN: ${{ secrets.HF_TOKEN }} + run: git remote add space https://ArissBandoss:$HF_TOKEN@huggingface.co/spaces/ArissBandoss/DeepFake-Videos-Detection + + - name: Push to hub + env: + HF_TOKEN: ${{ secrets.HF_TOKEN }} + run: git push --force https://ArissBandoss:$HF_TOKEN@huggingface.co/spaces/ArissBandoss/DeepFake-Videos-Detection main diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..92b21528ea391b607f6a952e034013f7ddf6b53f --- /dev/null +++ b/.gitignore @@ -0,0 +1,12 @@ +.idea +*__pycache__* +*.vscode* +*.pyc +*.pth +*.pt +*.dat +audios-testing +temp_video_frames +.gradio +*.zip +*.npy \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..bd80d63e33da48e3cca1d7f7577ac0f574a894ec --- /dev/null +++ b/app.py @@ -0,0 +1,132 @@ +import os +import cv2 +import torch +import numpy as np +from torchvision import transforms +from PIL import Image +from tqdm import tqdm +from training.detectors import DETECTOR +import yaml +import gradio as gr + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +# available models in the repository +AVAILABLE_MODELS = [ + "xception", + "ucf", +] + +# load the model +def load_model(model_name, config_path, weights_path): + with open(config_path, 'r') as f: + config = yaml.safe_load(f) + + config['model_name'] = model_name + + model_class = DETECTOR[model_name] + model = model_class(config).to(device) + + checkpoint = torch.load(weights_path, map_location=device) + model.load_state_dict(checkpoint, strict=True) + model.eval() + return model + +# preprocess a single video +def preprocess_video(video_path, output_dir, frame_num=32): + os.makedirs(output_dir, exist_ok=True) + frames_dir = os.path.join(output_dir, "frames") + os.makedirs(frames_dir, exist_ok=True) + + cap = cv2.VideoCapture(video_path) + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + frame_indices = np.linspace(0, total_frames - 1, frame_num, dtype=int) + + # extract frames + frames = [] + for idx in frame_indices: + cap.set(cv2.CAP_PROP_POS_FRAMES, idx) + ret, frame = cap.read() + if ret: + frame_path = os.path.join(frames_dir, f"frame_{idx:04d}.png") + cv2.imwrite(frame_path, frame) + frames.append(frame_path) + + cap.release() + return frames + +# inference on a single video +def infer_video(video_path, model, device): + # Preprocess the video + output_dir = "temp_video_frames" + frames = preprocess_video(video_path, output_dir) + + transform = transforms.Compose([ + transforms.Resize((256, 256)), + transforms.ToTensor(), + transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + ]) + + probs = [] + for frame_path in frames: + frame = Image.open(frame_path).convert("RGB") + frame = transform(frame).unsqueeze(0).to(device) + + data_dict = { + "image": frame, + "label": torch.tensor([0]).to(device), # Dummy label + "label_spe": torch.tensor([0]).to(device), # Dummy specific label + } + + with torch.no_grad(): + pred_dict = model(data_dict, inference=True) + + logits = pred_dict["cls"] # Shape: [batch_size, num_classes] + prob = torch.softmax(logits, dim=1)[:, 1].item() # Probability of being "fake" + probs.append(prob) + + # aggregate predictions (e.g., average probability) + avg_prob = np.mean(probs) + prediction = "Fake" if avg_prob > 0.5 else "Real" + return prediction, avg_prob + +# gradio inference function +def gradio_inference(video, model_name): + config_path = f"/teamspace/studios/this_studio/DeepfakeBench/training/config/detector/{model_name}.yaml" + weights_path = f"/teamspace/studios/this_studio/DeepfakeBench/training/weights/{model_name}_best.pth" + + if not os.path.exists(config_path): + return f"Error: Config file for model '{model_name}' not found at {config_path}." + if not os.path.exists(weights_path): + return f"Error: Weights file for model '{model_name}' not found at {weights_path}." + + model = load_model(model_name, config_path, weights_path) + + prediction, confidence = infer_video(video, model, device) + return f"Model: {model_name}\nPrediction: {prediction} (Confidence: {confidence:.4f})" + +# Gradio App +def create_gradio_app(): + with gr.Blocks() as demo: + gr.Markdown("# Deepfake Detection Demo") + gr.Markdown("Upload a video and select a model to detect if it's real or fake.") + + with gr.Row(): + video_input = gr.Video(label="Upload Video") + model_dropdown = gr.Dropdown(choices=AVAILABLE_MODELS, label="Select Model", value="xception") + + output_text = gr.Textbox(label="Prediction Result") + + submit_button = gr.Button("Run Inference") + submit_button.click( + fn=gradio_inference, + inputs=[video_input, model_dropdown], + outputs=output_text, + ) + + return demo + + +if __name__ == "__main__": + demo = create_gradio_app() + demo.launch(share=True) \ No newline at end of file diff --git a/inference.py b/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..079b1b02b592d6a66f0956efc0f8b33744b4a414 --- /dev/null +++ b/inference.py @@ -0,0 +1,117 @@ +import os +import cv2 +import torch +import numpy as np +from torchvision import transforms +from PIL import Image +from tqdm import tqdm +from training.detectors import DETECTOR +import yaml + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +# load the model +def load_model(model_name, config_path, weights_path): + with open(config_path, 'r') as f: + config = yaml.safe_load(f) + + config['model_name'] = model_name + + model_class = DETECTOR[model_name] + model = model_class(config).to(device) + + checkpoint = torch.load(weights_path, map_location=device) + model.load_state_dict(checkpoint, strict=True) + model.eval() + return model + +# preprocess a single video +def preprocess_video(video_path, output_dir, frame_num=32): + os.makedirs(output_dir, exist_ok=True) + frames_dir = os.path.join(output_dir, "frames") + os.makedirs(frames_dir, exist_ok=True) + + cap = cv2.VideoCapture(video_path) + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + frame_indices = np.linspace(0, total_frames - 1, frame_num, dtype=int) + + # extract frames + frames = [] + for idx in frame_indices: + cap.set(cv2.CAP_PROP_POS_FRAMES, idx) + ret, frame = cap.read() + if ret: + frame_path = os.path.join(frames_dir, f"frame_{idx:04d}.png") + cv2.imwrite(frame_path, frame) + frames.append(frame_path) + + cap.release() + return frames + +# inference on a single video +def infer_video(video_path, model, device): + output_dir = "temp_video_frames" + frames = preprocess_video(video_path, output_dir) + + transform = transforms.Compose([ + transforms.Resize((256, 256)), + transforms.ToTensor(), + transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + ]) + + probs = [] + for frame_path in frames: + frame = Image.open(frame_path).convert("RGB") + frame = transform(frame).unsqueeze(0).to(device) + + data_dict = { + "image": frame, + "label": torch.tensor([0]).to(device), # Dummy label + "label_spe": torch.tensor([0]).to(device), # Dummy specific label + } + + with torch.no_grad(): + pred_dict = model(data_dict, inference=True) + + logits = pred_dict["cls"] # Shape: [batch_size, num_classes] + prob = torch.softmax(logits, dim=1)[:, 1].item() # Probability of being "fake" + probs.append(prob) + + avg_prob = np.mean(probs) + prediction = "Fake" if avg_prob > 0.5 else "Real" + return prediction, avg_prob + +# main function for terminal-based inference +def main(video_filename, model_name): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + config_path = f"/teamspace/studios/this_studio/DeepfakeBench/training/config/detector/{model_name}.yaml" + weights_path = f"/teamspace/studios/this_studio/DeepfakeBench/training/weights/{model_name}_best.pth" + + if not os.path.exists(config_path): + print(f"Error: Config file for model '{model_name}' not found at {config_path}.") + return + if not os.path.exists(weights_path): + print(f"Error: Weights file for model '{model_name}' not found at {weights_path}.") + return + + model = load_model(model_name, config_path, weights_path) + + video_path = os.path.join(os.getcwd(), video_filename) + if not os.path.exists(video_path): + print(f"Error: Video file '{video_filename}' not found in the current directory.") + return + + prediction, confidence = infer_video(video_path, model, device) + print(f"Model: {model_name}") + print(f"Prediction: {prediction} (Confidence: {confidence:.4f})") + + +if __name__ == "__main__": + import sys + if len(sys.argv) != 3: + print("Usage: python inference_script.py ") + print("Available models: xception, meso4, meso4Inception, efficientnetb4, ucf, etc.") + else: + video_filename = sys.argv[1] + model_name = sys.argv[2] + main(video_filename, model_name) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..3d97b793724bc8ad2e5e520c6a2b1dbc62f37315 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,33 @@ +numpy==1.21.5 +pandas==1.4.2 +Pillow==9.0.1 +dlib==19.24.0 +imageio==2.9.0 +imgaug==0.4.0 +tqdm==4.61.0 +scipy==1.7.3 +seaborn==0.11.2 +pyyaml==6.0 +imutils==0.5.4 +opencv-python==4.6.0.66 +scikit-image==0.19.2 +scikit-learn==1.0.2 +albumentations==1.1.0 +torch==1.12.0 +torchvision==0.13.0 +torchaudio==0.12.0 +efficientnet-pytorch==0.7.1 +timm==0.6.12 +segmentation-models-pytorch==0.3.2 +torchtoolbox==0.1.8.2 +tensorboard==2.10.1 +setuptools==59.5.0 +loralib +einops +transformers +filterpy +simplejson +kornia +fvcore +imgaug==0.4.0 +git+https://github.com/openai/CLIP.git diff --git a/training/config/__init__.py b/training/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..676145d777810e4a51bdaf59fdec4f5358aae349 --- /dev/null +++ b/training/config/__init__.py @@ -0,0 +1,7 @@ +import os +import sys +current_file_path = os.path.abspath(__file__) +parent_dir = os.path.dirname(os.path.dirname(current_file_path)) +project_root_dir = os.path.dirname(parent_dir) +sys.path.append(parent_dir) +sys.path.append(project_root_dir) diff --git a/training/config/config/__init__.py b/training/config/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..379ba40bcdf97ea6dc4fca3dc8215da42b68cb31 --- /dev/null +++ b/training/config/config/__init__.py @@ -0,0 +1,7 @@ +import os +import sys +current_file_path = os.path.abspath(__file__) +parent_dir = os.path.dirname(os.path.dirname(current_file_path)) +project_root_dir = os.path.dirname(parent_dir) +sys.path.append(parent_dir) +sys.path.append(project_root_dir) diff --git a/training/config/config/backbone/cls_hrnet_w48.yaml b/training/config/config/backbone/cls_hrnet_w48.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b273cf5cada1c13e6b4c91c1f6b109c7dc0dd57a --- /dev/null +++ b/training/config/config/backbone/cls_hrnet_w48.yaml @@ -0,0 +1,103 @@ +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,1,2,3) +OUTPUT_DIR: 'output' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 100 + +DATASET: + DATASET: lip + ROOT: 'data/' + TEST_SET: 'list/lip/valList.txt' + TRAIN_SET: 'list/lip/trainList.txt' + NUM_CLASSES: 20 +MODEL: + NAME: cls_hrnet + #IMAGE_SIZE: + # - 224 + # - 224 + EXTRA: + STAGE1: + NUM_MODULES: 1 + NUM_RANCHES: 1 + BLOCK: BOTTLENECK + NUM_BLOCKS: + - 4 + NUM_CHANNELS: + - 64 + FUSE_METHOD: SUM + STAGE2: + NUM_MODULES: 1 + NUM_BRANCHES: 2 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + FUSE_METHOD: SUM + STAGE3: + NUM_MODULES: 4 + NUM_BRANCHES: 3 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + - 192 + FUSE_METHOD: SUM + STAGE4: + NUM_MODULES: 3 + NUM_BRANCHES: 4 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + - 192 + - 384 + FUSE_METHOD: SUM +LOSS: + USE_OHEM: false + OHEMTHRES: 0.9 + OHEMKEEP: 131072 +TRAIN: + IMAGE_SIZE: + - 473 + - 473 + BASE_SIZE: 473 + BATCH_SIZE_PER_GPU: 10 + SHUFFLE: true + BEGIN_EPOCH: 0 + END_EPOCH: 150 + RESUME: true + OPTIMIZER: sgd + LR: 0.007 + WD: 0.0005 + MOMENTUM: 0.9 + NESTEROV: false + FLIP: true + MULTI_SCALE: true + DOWNSAMPLERATE: 1 + IGNORE_LABEL: 255 + SCALE_FACTOR: 11 +TEST: + IMAGE_SIZE: + - 473 + - 473 + BASE_SIZE: 473 + BATCH_SIZE_PER_GPU: 16 + NUM_SAMPLES: 2000 + FLIP_TEST: false + MULTI_SCALE: false diff --git a/training/config/config/detector/efficientnetb4.yaml b/training/config/config/detector/efficientnetb4.yaml new file mode 100644 index 0000000000000000000000000000000000000000..50c8cd22af21b87a8772c814bb204d028a792ad7 --- /dev/null +++ b/training/config/config/detector/efficientnetb4.yaml @@ -0,0 +1,88 @@ +# log dir +log_dir: logs/evaluations/effnb4 + +# model setting +# pretrained: /home/zhiyuanyan/disfin/deepfake_benchmark/training/pretrained/xception-b5690688.pth # path to a pre-trained model, if using one +pretrained: ./training/pretrained/efficientnet-b4-6ed6700e.pth # path to a pre-trained model, if using one +model_name: efficientnetb4 # model name +backbone_name: efficientnetb4 # backbone name + +#backbone setting +backbone_config: + num_classes: 2 + inc: 3 + dropout: false + mode: Original + +# dataset +all_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT, FaceShifter, DeepFakeDetection, Celeb-DF-v1, Celeb-DF-v2, DFDCP, DFDC, DeeperForensics-1.0, UADFV] +train_dataset: [FF-NT] +test_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT] + +compression: c23 # compression-level for videos +train_batchSize: 32 # training batch size +test_batchSize: 32 # test batch size +workers: 8 # number of data loading workers +frame_num: {'train': 32, 'test': 32} # number of frames to use per video in training and testing +resolution: 256 # resolution of output image to network +with_mask: false # whether to include mask information in the input +with_landmark: false # whether to include facial landmark information in the input +save_ckpt: true # whether to save checkpoint +save_feat: true # whether to save features + + +# data augmentation +use_data_augmentation: true # Add this flag to enable/disable data augmentation +data_aug: + flip_prob: 0.5 + rotate_prob: 0.5 + rotate_limit: [-10, 10] + blur_prob: 0.5 + blur_limit: [3, 7] + brightness_prob: 0.5 + brightness_limit: [-0.1, 0.1] + contrast_limit: [-0.1, 0.1] + quality_lower: 40 + quality_upper: 100 + +# mean and std for normalization +mean: [0.5, 0.5, 0.5] +std: [0.5, 0.5, 0.5] + +# optimizer config +optimizer: + # choose between 'adam' and 'sgd' + type: adam + adam: + lr: 0.0002 # learning rate + beta1: 0.9 # beta1 for Adam optimizer + beta2: 0.999 # beta2 for Adam optimizer + eps: 0.00000001 # epsilon for Adam optimizer + weight_decay: 0.0005 # weight decay for regularization + amsgrad: false + sgd: + lr: 0.0002 # learning rate + momentum: 0.9 # momentum for SGD optimizer + weight_decay: 0.0005 # weight decay for regularization + +# training config +lr_scheduler: null # learning rate scheduler +nEpochs: 10 # number of epochs to train for +start_epoch: 0 # manual epoch number (useful for restarts) +save_epoch: 1 # interval epochs for saving models +rec_iter: 100 # interval iterations for recording +logdir: ./logs # folder to output images and logs +manualSeed: 1024 # manual seed for random number generation +save_ckpt: false # whether to save checkpoint + +# loss function +loss_func: cross_entropy # loss function to use +losstype: null + +# metric +metric_scoring: auc # metric for evaluation (auc, acc, eer, ap) + +# cuda + +cuda: true # whether to use CUDA acceleration +cudnn: true # whether to use CuDNN for convolution operations diff --git a/training/config/config/detector/resnet34.yaml b/training/config/config/detector/resnet34.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b3e92cf64e67dccbb75ec98838a6dfb3710631a3 --- /dev/null +++ b/training/config/config/detector/resnet34.yaml @@ -0,0 +1,87 @@ +# log dir +log_dir: /mntcephfs/lab_data/zhiyuanyan/benchmark_results/logs_final/resnet18 + +# model setting +pretrained: /home/zhiyuanyan/disfin/deepfake_benchmark/training/pretrained/resnet34-b627a593.pth # path to a pre-trained model, if using one +model_name: resnet34 # model name +backbone_name: resnet34 # backbone name + +#backbone setting +backbone_config: + num_classes: 2 + inc: 3 + dropout: false + mode: Original + +# dataset +all_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT, FaceShifter, DeepFakeDetection, Celeb-DF-v1, Celeb-DF-v2, DFDCP, DFDC, DeeperForensics-1.0, UADFV] +train_dataset: [FF-NT] +test_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT] + +compression: c23 # compression-level for videos +train_batchSize: 32 # training batch size +test_batchSize: 32 # test batch size +workers: 8 # number of data loading workers +frame_num: {'train': 32, 'test': 32} # number of frames to use per video in training and testing +resolution: 256 # resolution of output image to network +with_mask: false # whether to include mask information in the input +with_landmark: false # whether to include facial landmark information in the input +save_ckpt: true # whether to save checkpoint +save_feat: true # whether to save features + + +# data augmentation +use_data_augmentation: true # Add this flag to enable/disable data augmentation +data_aug: + flip_prob: 0.5 + rotate_prob: 0.5 + rotate_limit: [-10, 10] + blur_prob: 0.5 + blur_limit: [3, 7] + brightness_prob: 0.5 + brightness_limit: [-0.1, 0.1] + contrast_limit: [-0.1, 0.1] + quality_lower: 40 + quality_upper: 100 + +# mean and std for normalization +mean: [0.5, 0.5, 0.5] +std: [0.5, 0.5, 0.5] + +# optimizer config +optimizer: + # choose between 'adam' and 'sgd' + type: adam + adam: + lr: 0.0002 # learning rate + beta1: 0.9 # beta1 for Adam optimizer + beta2: 0.999 # beta2 for Adam optimizer + eps: 0.00000001 # epsilon for Adam optimizer + weight_decay: 0.0005 # weight decay for regularization + amsgrad: false + sgd: + lr: 0.0002 # learning rate + momentum: 0.9 # momentum for SGD optimizer + weight_decay: 0.0005 # weight decay for regularization + +# training config +lr_scheduler: null # learning rate scheduler +nEpochs: 10 # number of epochs to train for +start_epoch: 0 # manual epoch number (useful for restarts) +save_epoch: 1 # interval epochs for saving models +rec_iter: 100 # interval iterations for recording +logdir: ./logs # folder to output images and logs +manualSeed: 1024 # manual seed for random number generation +save_ckpt: false # whether to save checkpoint + +# loss function +loss_func: cross_entropy # loss function to use +losstype: null + +# metric +metric_scoring: auc # metric for evaluation (auc, acc, eer, ap) + +# cuda + +cuda: true # whether to use CUDA acceleration +cudnn: true # whether to use CuDNN for convolution operations diff --git a/training/config/config/detector/ucf.yaml b/training/config/config/detector/ucf.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2d368c4ffc5cb805aad50f27b2710af373e19859 --- /dev/null +++ b/training/config/config/detector/ucf.yaml @@ -0,0 +1,130 @@ +# log dir +log_dir: /data/home/zhiyuanyan/DeepfakeBench/debug_logs/ucf + +# model setting +pretrained: ./training/pretrained/xception-b5690688.pth # path to a pre-trained model, if using one +# pretrained: '/home/zhiyuanyan/.cache/torch/hub/checkpoints/resnet34-b627a593.pth' # path to a pre-trained model, if using one +model_name: ucf # model name +backbone_name: xception # backbone name +encoder_feat_dim: 512 # feature dimension of the backbone + +#backbone setting +backbone_config: + mode: adjust_channel + num_classes: 2 + inc: 3 + dropout: false + +# dataset +all_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT, FaceShifter, DeepFakeDetection, Celeb-DF-v1, Celeb-DF-v2, DFDCP, DFDC, DeeperForensics-1.0, UADFV] +train_dataset: [FF-F2F, FF-DF, FF-FS, FF-NT,] +test_dataset: [Celeb-DF-v2] +dataset_type: pair + +compression: c23 # compression-level for videos +train_batchSize: 16 # training batch size +test_batchSize: 32 # test batch size +workers: 8 # number of data loading workers +frame_num: {'train': 32, 'test': 32} # number of frames to use per video in training and testing +resolution: 256 # resolution of output image to network +with_mask: false # whether to include mask information in the input +with_landmark: false # whether to include facial landmark information in the input +save_feat: true # whether to save features + +# label settings +label_dict: + # DFD + DFD_fake: 1 + DFD_real: 0 + FaceShifter: 1 + FF-FH: 1 + # FF++ + FaceShifter(FF-real+FF-FH) + # ucf specific label setting + FF-DF: 1 + FF-F2F: 2 + FF-FS: 3 + FF-NT: 4 + FF-real: 0 + # CelebDF + CelebDFv1_real: 0 + CelebDFv1_fake: 1 + CelebDFv2_real: 0 + CelebDFv2_fake: 1 + # DFDCP + DFDCP_Real: 0 + DFDCP_FakeA: 1 + DFDCP_FakeB: 1 + # DFDC + DFDC_Fake: 1 + DFDC_Real: 0 + # DeeperForensics-1.0 + DF_fake: 1 + DF_real: 0 + # UADFV + UADFV_Fake: 1 + UADFV_Real: 0 + # roop + roop_Fake: 1 + roop_Real: 0 + + + +# data augmentation +use_data_augmentation: true # Add this flag to enable/disable data augmentation +data_aug: + flip_prob: 0.5 + rotate_prob: 0.5 + rotate_limit: [-10, 10] + blur_prob: 0.5 + blur_limit: [3, 7] + brightness_prob: 0.5 + brightness_limit: [-0.1, 0.1] + contrast_limit: [-0.1, 0.1] + quality_lower: 40 + quality_upper: 100 + +# mean and std for normalization +mean: [0.5, 0.5, 0.5] +std: [0.5, 0.5, 0.5] + +# optimizer config +optimizer: + # choose between 'adam' and 'sgd' + type: adam + adam: + lr: 0.0002 # learning rate + beta1: 0.9 # beta1 for Adam optimizer + beta2: 0.999 # beta2 for Adam optimizer + eps: 0.00000001 # epsilon for Adam optimizer + weight_decay: 0.0005 # weight decay for regularization + amsgrad: false + sgd: + lr: 0.0002 # learning rate + momentum: 0.9 # momentum for SGD optimizer + weight_decay: 0.0005 # weight decay for regularization + +# training config +lr_scheduler: null # learning rate scheduler +nEpochs: 5 # number of epochs to train for +start_epoch: 0 # manual epoch number (useful for restarts) +save_epoch: 1 # interval epochs for saving models +rec_iter: 100 # interval iterations for recording +logdir: ./logs # folder to output images and logs +manualSeed: 1024 # manual seed for random number generation +save_ckpt: false # whether to save checkpoint + +# loss function +loss_func: + cls_loss: cross_entropy # loss function to use + spe_loss: cross_entropy + con_loss: contrastive_regularization + rec_loss: l1loss +losstype: null + +# metric +metric_scoring: auc # metric for evaluation (auc, acc, eer, ap) + +# cuda + +cuda: true # whether to use CUDA acceleration +cudnn: true # whether to use CuDNN for convolution operations diff --git a/training/config/config/detector/xception.yaml b/training/config/config/detector/xception.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9945947858e60c83d2b34d15f0d1a31d72459a6e --- /dev/null +++ b/training/config/config/detector/xception.yaml @@ -0,0 +1,86 @@ +# log dir +log_dir: /data/home/zhiyuanyan/DeepfakeBench/logs/testing_bench + +# model setting +pretrained: /teamspace/studios/this_studio/DeepfakeBench/training/pretrained/xception-b5690688.pth # path to a pre-trained model, if using one +model_name: xception # model name +backbone_name: xception # backbone name + +#backbone setting +backbone_config: + mode: original + num_classes: 2 + inc: 3 + dropout: false + +# dataset +all_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT, FaceShifter, DeepFakeDetection, Celeb-DF-v1, Celeb-DF-v2, DFDCP, DFDC, DeeperForensics-1.0, UADFV] +train_dataset: [Celeb-DF-v1, DFDCP, UADFV] +test_dataset: [Celeb-DF-v1, DFDCP, UADFV] + +compression: c23 # compression-level for videos +train_batchSize: 32 # training batch size +test_batchSize: 32 # test batch size +workers: 8 # number of data loading workers +frame_num: {'train': 32, 'test': 32} # number of frames to use per video in training and testing +resolution: 256 # resolution of output image to network +with_mask: false # whether to include mask information in the input +with_landmark: false # whether to include facial landmark information in the input + + +# data augmentation +use_data_augmentation: true # Add this flag to enable/disable data augmentation +data_aug: + flip_prob: 0.5 + rotate_prob: 0.0 + rotate_limit: [-10, 10] + blur_prob: 0.5 + blur_limit: [3, 7] + brightness_prob: 0.5 + brightness_limit: [-0.1, 0.1] + contrast_limit: [-0.1, 0.1] + quality_lower: 40 + quality_upper: 100 + +# mean and std for normalization +mean: [0.5, 0.5, 0.5] +std: [0.5, 0.5, 0.5] + +# optimizer config +optimizer: + # choose between 'adam' and 'sgd' + type: adam + adam: + lr: 0.0002 # learning rate + beta1: 0.9 # beta1 for Adam optimizer + beta2: 0.999 # beta2 for Adam optimizer + eps: 0.00000001 # epsilon for Adam optimizer + weight_decay: 0.0005 # weight decay for regularization + amsgrad: false + sgd: + lr: 0.0002 # learning rate + momentum: 0.9 # momentum for SGD optimizer + weight_decay: 0.0005 # weight decay for regularization + +# training config +lr_scheduler: null # learning rate scheduler +nEpochs: 10 # number of epochs to train for +start_epoch: 0 # manual epoch number (useful for restarts) +save_epoch: 1 # interval epochs for saving models +rec_iter: 100 # interval iterations for recording +logdir: ./logs # folder to output images and logs +manualSeed: 1024 # manual seed for random number generation +save_ckpt: true # whether to save checkpoint +save_feat: true # whether to save features + +# loss function +loss_func: cross_entropy # loss function to use +losstype: null + +# metric +metric_scoring: auc # metric for evaluation (auc, acc, eer, ap) + +# cuda + +cuda: true # whether to use CUDA acceleration +cudnn: true # whether to use CuDNN for convolution operations diff --git a/training/config/config/test_config.yaml b/training/config/config/test_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..88e4457aac9081a7ebab44344401310e7a0f0d51 --- /dev/null +++ b/training/config/config/test_config.yaml @@ -0,0 +1,38 @@ +mode: test +lmdb: False +dataset_root_rgb: './datasets' +lmdb_dir: 'I:\transform_2_lmdb' +dataset_json_folder: '/teamspace/studios/this_studio/DeepfakeBench/preprocessing/dataset_json' +label_dict: + # DFD + DFD_fake: 1 + DFD_real: 0 + # FF++ + FaceShifter(FF-real+FF-FH) + FF-SH: 1 + FF-F2F: 1 + FF-DF: 1 + FF-FS: 1 + FF-NT: 1 + FF-FH: 1 + FF-real: 0 + # CelebDF + CelebDFv1_real: 0 + CelebDFv1_fake: 1 + CelebDFv2_real: 0 + CelebDFv2_fake: 1 + # DFDCP + DFDCP_Real: 0 + DFDCP_FakeA: 1 + DFDCP_FakeB: 1 + # DFDC + DFDC_Fake: 1 + DFDC_Real: 0 + # DeeperForensics-1.0 + DF_fake: 1 + DF_real: 0 + # UADFV + UADFV_Fake: 1 + UADFV_Real: 0 + # Roop + roop_Real: 0 + roop_Fake: 1 \ No newline at end of file diff --git a/training/config/config/train_config.yaml b/training/config/config/train_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9f2d4bd5ac44ac83b4115b4e4484f2a484e28e1f --- /dev/null +++ b/training/config/config/train_config.yaml @@ -0,0 +1,43 @@ +mode: train +lmdb: False +dry_run: false +dataset_root_rgb: './datasets' +lmdb_dir: 'I:\transform_2_lmdb' +dataset_json_folder: '/teamspace/studios/this_studio/DeepfakeBench/preprocessing/dataset_json' +SWA: False +save_avg: True +log_dir: ./logs/training/ +# label settings +label_dict: + # DFD + DFD_fake: 1 + DFD_real: 0 + # FF++ + FaceShifter(FF-real+FF-FH) + FF-SH: 1 + FF-F2F: 1 + FF-DF: 1 + FF-FS: 1 + FF-NT: 1 + FF-FH: 1 + FF-real: 0 + # CelebDF + CelebDFv1_real: 0 + CelebDFv1_fake: 1 + CelebDFv2_real: 0 + CelebDFv2_fake: 1 + # DFDCP + DFDCP_Real: 0 + DFDCP_FakeA: 1 + DFDCP_FakeB: 1 + # DFDC + DFDC_Fake: 1 + DFDC_Real: 0 + # DeeperForensics-1.0 + DF_fake: 1 + DF_real: 0 + # UADFV + UADFV_Fake: 1 + UADFV_Real: 0 + # Roop + roop_Real: 0 + roop_Fake: 1 \ No newline at end of file diff --git a/training/config/detector/efficientnetb4.yaml b/training/config/detector/efficientnetb4.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6be616ebe624f991ad0151b91ca6c5ff53493d51 --- /dev/null +++ b/training/config/detector/efficientnetb4.yaml @@ -0,0 +1,88 @@ +# log dir +log_dir: logs/evaluations/effnb4 + +# model setting +# pretrained: /home/zhiyuanyan/disfin/deepfake_benchmark/training/pretrained/xception-b5690688.pth # path to a pre-trained model, if using one +pretrained: ./training/pretrained/efficientnet-b4-6ed6700e.pth # path to a pre-trained model, if using one +model_name: efficientnetb4 # model name +backbone_name: efficientnetb4 # backbone name + +#backbone setting +backbone_config: + num_classes: 2 + inc: 3 + dropout: false + mode: Original + +# dataset +all_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT, FaceShifter, DeepFakeDetection, Celeb-DF-v1, Celeb-DF-v2, DFDCP, DFDC, DeeperForensics-1.0, UADFV] +train_dataset: [FF-NT] +test_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT] + +compression: c23 # compression-level for videos +train_batchSize: 32 # training batch size +test_batchSize: 32 # test batch size +workers: 8 # number of data loading workers +frame_num: {'train': 32, 'test': 32} # number of frames to use per video in training and testing +resolution: 256 # resolution of output image to network +with_mask: false # whether to include mask information in the input +with_landmark: false # whether to include facial landmark information in the input +save_ckpt: true # whether to save checkpoint +save_feat: true # whether to save features + + +# data augmentation +use_data_augmentation: true # Add this flag to enable/disable data augmentation +data_aug: + flip_prob: 0.5 + rotate_prob: 0.5 + rotate_limit: [-10, 10] + blur_prob: 0.5 + blur_limit: [3, 7] + brightness_prob: 0.5 + brightness_limit: [-0.1, 0.1] + contrast_limit: [-0.1, 0.1] + quality_lower: 40 + quality_upper: 100 + +# mean and std for normalization +mean: [0.5, 0.5, 0.5] +std: [0.5, 0.5, 0.5] + +# optimizer config +optimizer: + # choose between 'adam' and 'sgd' + type: adam + adam: + lr: 0.0002 # learning rate + beta1: 0.9 # beta1 for Adam optimizer + beta2: 0.999 # beta2 for Adam optimizer + eps: 0.00000001 # epsilon for Adam optimizer + weight_decay: 0.0005 # weight decay for regularization + amsgrad: false + sgd: + lr: 0.0002 # learning rate + momentum: 0.9 # momentum for SGD optimizer + weight_decay: 0.0005 # weight decay for regularization + +# training config +lr_scheduler: null # learning rate scheduler +nEpochs: 10 # number of epochs to train for +start_epoch: 0 # manual epoch number (useful for restarts) +save_epoch: 1 # interval epochs for saving models +rec_iter: 100 # interval iterations for recording +logdir: ./logs # folder to output images and logs +manualSeed: 1024 # manual seed for random number generation +save_ckpt: false # whether to save checkpoint + +# loss function +loss_func: cross_entropy # loss function to use +losstype: null + +# metric +metric_scoring: auc # metric for evaluation (auc, acc, eer, ap) + +# cuda + +cuda: true # whether to use CUDA acceleration +cudnn: true # whether to use CuDNN for convolution operations diff --git a/training/config/detector/ucf.yaml b/training/config/detector/ucf.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4db4c43a7c680f80d92fa615f3ad71f0d3cba930 --- /dev/null +++ b/training/config/detector/ucf.yaml @@ -0,0 +1,131 @@ +# log dir +log_dir: /data/home/zhiyuanyan/DeepfakeBench/debug_logs/ucf + +# model setting +pretrained: /teamspace/studios/this_studio/DeepfakeBench/training/pretrained/xception-b5690688.pth # path to a pre-trained model, if using one +# pretrained: '/home/zhiyuanyan/.cache/torch/hub/checkpoints/resnet34-b627a593.pth' # path to a pre-trained model, if using one +model_name: ucf # model name +backbone_name: xception # backbone name +encoder_feat_dim: 512 # feature dimension of the backbone + +#backbone setting +backbone_config: + mode: adjust_channel + num_classes: 2 + inc: 3 + dropout: false + +# dataset +all_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT, FaceShifter, DeepFakeDetection, Celeb-DF-v1, Celeb-DF-v2, DFDCP, DFDC, DeeperForensics-1.0, UADFV] +train_dataset: [FF-F2F, FF-DF, FF-FS, FF-NT,] +test_dataset: [Celeb-DF-v2] +dataset_type: pair + +compression: c23 # compression-level for videos +train_batchSize: 16 # training batch size +test_batchSize: 32 # test batch size +workers: 8 # number of data loading workers +frame_num: {'train': 32, 'test': 32} # number of frames to use per video in training and testing +resolution: 256 # resolution of output image to network +with_mask: false # whether to include mask information in the input +with_landmark: false # whether to include facial landmark information in the input +save_ckpt: true # whether to save checkpoint +save_feat: true # whether to save features + +# label settings +label_dict: + # DFD + DFD_fake: 1 + DFD_real: 0 + FaceShifter: 1 + FF-FH: 1 + # FF++ + FaceShifter(FF-real+FF-FH) + # ucf specific label setting + FF-DF: 1 + FF-F2F: 2 + FF-FS: 3 + FF-NT: 4 + FF-real: 0 + # CelebDF + CelebDFv1_real: 0 + CelebDFv1_fake: 1 + CelebDFv2_real: 0 + CelebDFv2_fake: 1 + # DFDCP + DFDCP_Real: 0 + DFDCP_FakeA: 1 + DFDCP_FakeB: 1 + # DFDC + DFDC_Fake: 1 + DFDC_Real: 0 + # DeeperForensics-1.0 + DF_fake: 1 + DF_real: 0 + # UADFV + UADFV_Fake: 1 + UADFV_Real: 0 + # roop + roop_Fake: 1 + roop_Real: 0 + + + +# data augmentation +use_data_augmentation: true # Add this flag to enable/disable data augmentation +data_aug: + flip_prob: 0.5 + rotate_prob: 0.5 + rotate_limit: [-10, 10] + blur_prob: 0.5 + blur_limit: [3, 7] + brightness_prob: 0.5 + brightness_limit: [-0.1, 0.1] + contrast_limit: [-0.1, 0.1] + quality_lower: 40 + quality_upper: 100 + +# mean and std for normalization +mean: [0.5, 0.5, 0.5] +std: [0.5, 0.5, 0.5] + +# optimizer config +optimizer: + # choose between 'adam' and 'sgd' + type: adam + adam: + lr: 0.0002 # learning rate + beta1: 0.9 # beta1 for Adam optimizer + beta2: 0.999 # beta2 for Adam optimizer + eps: 0.00000001 # epsilon for Adam optimizer + weight_decay: 0.0005 # weight decay for regularization + amsgrad: false + sgd: + lr: 0.0002 # learning rate + momentum: 0.9 # momentum for SGD optimizer + weight_decay: 0.0005 # weight decay for regularization + +# training config +lr_scheduler: null # learning rate scheduler +nEpochs: 5 # number of epochs to train for +start_epoch: 0 # manual epoch number (useful for restarts) +save_epoch: 1 # interval epochs for saving models +rec_iter: 100 # interval iterations for recording +logdir: ./logs # folder to output images and logs +manualSeed: 1024 # manual seed for random number generation +save_ckpt: false # whether to save checkpoint + +# loss function +loss_func: + cls_loss: cross_entropy # loss function to use + spe_loss: cross_entropy + con_loss: contrastive_regularization + rec_loss: l1loss +losstype: null + +# metric +metric_scoring: auc # metric for evaluation (auc, acc, eer, ap) + +# cuda + +cuda: true # whether to use CUDA acceleration +cudnn: true # whether to use CuDNN for convolution operations diff --git a/training/config/detector/xception.yaml b/training/config/detector/xception.yaml new file mode 100644 index 0000000000000000000000000000000000000000..56ddcab7fe4ccb0e2a37a6fe22585f28a73df4ee --- /dev/null +++ b/training/config/detector/xception.yaml @@ -0,0 +1,86 @@ +# log dir +log_dir: /teamspace/studios/this_studio/DeepfakeBench/logs/testing_bench + +# model setting +pretrained: /teamspace/studios/this_studio/DeepfakeBench/training/pretrained/xception-b5690688.pth # path to a pre-trained model, if using one +model_name: xception # model name +backbone_name: xception # backbone name + +#backbone setting +backbone_config: + mode: original + num_classes: 2 + inc: 3 + dropout: false + +# dataset +all_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT, FaceShifter, DeepFakeDetection, Celeb-DF-v1, Celeb-DF-v2, DFDCP, DFDC, DeeperForensics-1.0, UADFV] +train_dataset: [Celeb-DF-v1, DFDCP] +test_dataset: [UADFV] + +compression: c23 # compression-level for videos +train_batchSize: 32 # training batch size +test_batchSize: 32 # test batch size +workers: 8 # number of data loading workers +frame_num: {'train': 32, 'test': 32} # number of frames to use per video in training and testing +resolution: 256 # resolution of output image to network +with_mask: false # whether to include mask information in the input +with_landmark: false # whether to include facial landmark information in the input + + +# data augmentation +use_data_augmentation: true # Add this flag to enable/disable data augmentation +data_aug: + flip_prob: 0.5 + rotate_prob: 0.0 + rotate_limit: [-10, 10] + blur_prob: 0.5 + blur_limit: [3, 7] + brightness_prob: 0.5 + brightness_limit: [-0.1, 0.1] + contrast_limit: [-0.1, 0.1] + quality_lower: 40 + quality_upper: 100 + +# mean and std for normalization +mean: [0.5, 0.5, 0.5] +std: [0.5, 0.5, 0.5] + +# optimizer config +optimizer: + # choose between 'adam' and 'sgd' + type: adam + adam: + lr: 0.0002 # learning rate + beta1: 0.9 # beta1 for Adam optimizer + beta2: 0.999 # beta2 for Adam optimizer + eps: 0.00000001 # epsilon for Adam optimizer + weight_decay: 0.0005 # weight decay for regularization + amsgrad: false + sgd: + lr: 0.0002 # learning rate + momentum: 0.9 # momentum for SGD optimizer + weight_decay: 0.0005 # weight decay for regularization + +# training config +lr_scheduler: null # learning rate scheduler +nEpochs: 10 # number of epochs to train for +start_epoch: 0 # manual epoch number (useful for restarts) +save_epoch: 1 # interval epochs for saving models +rec_iter: 100 # interval iterations for recording +logdir: ./logs # folder to output images and logs +manualSeed: 1024 # manual seed for random number generation +save_ckpt: true # whether to save checkpoint +save_feat: true # whether to save features + +# loss function +loss_func: cross_entropy # loss function to use +losstype: null + +# metric +metric_scoring: auc # metric for evaluation (auc, acc, eer, ap) + +# cuda + +cuda: true # whether to use CUDA acceleration +cudnn: true # whether to use CuDNN for convolution operations diff --git a/training/config/test_config.yaml b/training/config/test_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..eedc83afc51b2a6db84d30a578e5cf8cb87088fb --- /dev/null +++ b/training/config/test_config.yaml @@ -0,0 +1,38 @@ +mode: test +lmdb: False +rgb_dir: '' +lmdb_dir: './datasets/lmdb' +dataset_json_folder: './preprocessing/dataset_json' +label_dict: + # DFD + DFD_fake: 1 + DFD_real: 0 + # FF++ + FaceShifter(FF-real+FF-FH) + FF-SH: 1 + FF-F2F: 1 + FF-DF: 1 + FF-FS: 1 + FF-NT: 1 + FF-FH: 1 + FF-real: 0 + # CelebDF + CelebDFv1_real: 0 + CelebDFv1_fake: 1 + CelebDFv2_real: 0 + CelebDFv2_fake: 1 + # DFDCP + DFDCP_Real: 0 + DFDCP_FakeA: 1 + DFDCP_FakeB: 1 + # DFDC + DFDC_Fake: 1 + DFDC_Real: 0 + # DeeperForensics-1.0 + DF_fake: 1 + DF_real: 0 + # UADFV + UADFV_Fake: 1 + UADFV_Real: 0 + # Roop + roop_Real: 0 + roop_Fake: 1 \ No newline at end of file diff --git a/training/config/train_config.yaml b/training/config/train_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..73e24749329001a61e155db545d18b12a45762b5 --- /dev/null +++ b/training/config/train_config.yaml @@ -0,0 +1,43 @@ +mode: train +lmdb: False +dry_run: false +rgb_dir: '' +lmdb_dir: './datasets/lmdb' +dataset_json_folder: './preprocessing/dataset_json' +SWA: False +save_avg: True +log_dir: ./logs/training/ +# label settings +label_dict: + # DFD + DFD_fake: 1 + DFD_real: 0 + # FF++ + FaceShifter(FF-real+FF-FH) + FF-SH: 1 + FF-F2F: 1 + FF-DF: 1 + FF-FS: 1 + FF-NT: 1 + FF-FH: 1 + FF-real: 0 + # CelebDF + CelebDFv1_real: 0 + CelebDFv1_fake: 1 + CelebDFv2_real: 0 + CelebDFv2_fake: 1 + # DFDCP + DFDCP_Real: 0 + DFDCP_FakeA: 1 + DFDCP_FakeB: 1 + # DFDC + DFDC_Fake: 1 + DFDC_Real: 0 + # DeeperForensics-1.0 + DF_fake: 1 + DF_real: 0 + # UADFV + UADFV_Fake: 1 + UADFV_Real: 0 + # Roop + roop_Real: 0 + roop_Fake: 1 \ No newline at end of file diff --git a/training/dataset/I2G_dataset.py b/training/dataset/I2G_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..7bfa7c0a4da4d015678ab682895acbc827358dbb --- /dev/null +++ b/training/dataset/I2G_dataset.py @@ -0,0 +1,389 @@ +# Created by: Kaede Shiohara +# Yamasaki Lab at The University of Tokyo +# shiohara@cvm.t.u-tokyo.ac.jp +# Copyright (c) 2021 +# 3rd party softwares' licenses are noticed at https://github.com/mapooon/SelfBlendedImages/blob/master/LICENSE +import logging +import os +import pickle + +import cv2 +import numpy as np +import scipy as sp +import yaml +from skimage.measure import label, regionprops +import random +from PIL import Image +import sys +import albumentations as A +from torch.utils.data import DataLoader +from dataset.utils.bi_online_generation import random_get_hull +from dataset.abstract_dataset import DeepfakeAbstractBaseDataset +from dataset.pair_dataset import pairDataset +import torch + +class RandomDownScale(A.core.transforms_interface.ImageOnlyTransform): + def apply(self, img, ratio_list=None, **params): + if ratio_list is None: + ratio_list = [2, 4] + r = ratio_list[np.random.randint(len(ratio_list))] + return self.randomdownscale(img, r) + + def randomdownscale(self, img, r): + keep_ratio = True + keep_input_shape = True + H, W, C = img.shape + + img_ds = cv2.resize(img, (int(W / r), int(H / r)), interpolation=cv2.INTER_NEAREST) + if keep_input_shape: + img_ds = cv2.resize(img_ds, (W, H), interpolation=cv2.INTER_LINEAR) + + return img_ds + + +''' +from PIL import ImageDraw +# 创建一个可以在图像上绘制的对象 +img_pil=Image.fromarray(img) +draw = ImageDraw.Draw(img_pil) + +# 在图像上绘制点 +for i, point in enumerate(landmark): + x, y = point + radius = 1 # 点的半径 + draw.ellipse((x-radius, y-radius, x+radius, y+radius), fill="red") + draw.text((x+radius+2, y-radius), str(i), fill="black") # 在点旁边添加标签 +img_pil.show() + +''' + +def alpha_blend(source, target, mask): + mask_blured = get_blend_mask(mask) + img_blended = (mask_blured * source + (1 - mask_blured) * target) + return img_blended, mask_blured + + +def dynamic_blend(source, target, mask): + mask_blured = get_blend_mask(mask) + # worth consideration, 1 in the official paper, 0.25, 0.5, 0.75,1,1,1 in sbi. + blend_list = [1, 1, 1] + blend_ratio = blend_list[np.random.randint(len(blend_list))] + mask_blured *= blend_ratio + img_blended = (mask_blured * source + (1 - mask_blured) * target) + return img_blended, mask_blured + + +def get_blend_mask(mask): + H, W = mask.shape + size_h = np.random.randint(192, 257) + size_w = np.random.randint(192, 257) + mask = cv2.resize(mask, (size_w, size_h)) + kernel_1 = random.randrange(5, 26, 2) + kernel_1 = (kernel_1, kernel_1) + kernel_2 = random.randrange(5, 26, 2) + kernel_2 = (kernel_2, kernel_2) + + mask_blured = cv2.GaussianBlur(mask, kernel_1, 0) + mask_blured = mask_blured / (mask_blured.max()) + mask_blured[mask_blured < 1] = 0 + + mask_blured = cv2.GaussianBlur(mask_blured, kernel_2, np.random.randint(5, 46)) + mask_blured = mask_blured / (mask_blured.max()) + mask_blured = cv2.resize(mask_blured, (W, H)) + return mask_blured.reshape((mask_blured.shape + (1,))) + + +def get_alpha_blend_mask(mask): + kernel_list = [(11, 11), (9, 9), (7, 7), (5, 5), (3, 3)] + blend_list = [0.25, 0.5, 0.75] + kernel_idxs = random.choices(range(len(kernel_list)), k=2) + blend_ratio = blend_list[random.sample(range(len(blend_list)), 1)[0]] + mask_blured = cv2.GaussianBlur(mask, kernel_list[0], 0) + # print(mask_blured.max()) + mask_blured[mask_blured < mask_blured.max()] = 0 + mask_blured[mask_blured > 0] = 1 + # mask_blured = mask + mask_blured = cv2.GaussianBlur(mask_blured, kernel_list[kernel_idxs[1]], 0) + mask_blured = mask_blured / (mask_blured.max()) + return mask_blured.reshape((mask_blured.shape + (1,))) + + +class I2GDataset(DeepfakeAbstractBaseDataset): + def __init__(self, config=None, mode='train'): + #config['GridShuffle']['p'] = 0 + super().__init__(config, mode) + real_images_list = [img for img, label in zip(self.image_list, self.label_list) if label == 0] + self.real_images_list = list(set(real_images_list)) # de-duplicate since DF,F2F,FS,NT have same real images + self.source_transforms = self.get_source_transforms() + self.transforms = self.get_transforms() + self.init_nearest() + + def init_nearest(self): + if os.path.exists('training/lib/nearest_face_info.pkl'): + with open('training/lib/nearest_face_info.pkl', 'rb') as f: + face_info = pickle.load(f) + self.face_info = face_info + # Check if the dictionary has already been created + if os.path.exists('training/lib/landmark_dict_ffall.pkl'): + with open('training/lib/landmark_dict_ffall.pkl', 'rb') as f: + landmark_dict = pickle.load(f) + self.landmark_dict = landmark_dict + + def reorder_landmark(self, landmark): + landmark = landmark.copy() # 创建landmark的副本 + landmark_add = np.zeros((13, 2)) + for idx, idx_l in enumerate([77, 75, 76, 68, 69, 70, 71, 80, 72, 73, 79, 74, 78]): + landmark_add[idx] = landmark[idx_l] + landmark[68:] = landmark_add + return landmark + + def hflip(self, img, mask=None, landmark=None, bbox=None): + H, W = img.shape[:2] + landmark = landmark.copy() + if bbox is not None: + bbox = bbox.copy() + + if landmark is not None: + landmark_new = np.zeros_like(landmark) + + landmark_new[:17] = landmark[:17][::-1] + landmark_new[17:27] = landmark[17:27][::-1] + + landmark_new[27:31] = landmark[27:31] + landmark_new[31:36] = landmark[31:36][::-1] + + landmark_new[36:40] = landmark[42:46][::-1] + landmark_new[40:42] = landmark[46:48][::-1] + + landmark_new[42:46] = landmark[36:40][::-1] + landmark_new[46:48] = landmark[40:42][::-1] + + landmark_new[48:55] = landmark[48:55][::-1] + landmark_new[55:60] = landmark[55:60][::-1] + + landmark_new[60:65] = landmark[60:65][::-1] + landmark_new[65:68] = landmark[65:68][::-1] + if len(landmark) == 68: + pass + elif len(landmark) == 81: + landmark_new[68:81] = landmark[68:81][::-1] + else: + raise NotImplementedError + landmark_new[:, 0] = W - landmark_new[:, 0] + + else: + landmark_new = None + + if bbox is not None: + bbox_new = np.zeros_like(bbox) + bbox_new[0, 0] = bbox[1, 0] + bbox_new[1, 0] = bbox[0, 0] + bbox_new[:, 0] = W - bbox_new[:, 0] + bbox_new[:, 1] = bbox[:, 1].copy() + if len(bbox) > 2: + bbox_new[2, 0] = W - bbox[3, 0] + bbox_new[2, 1] = bbox[3, 1] + bbox_new[3, 0] = W - bbox[2, 0] + bbox_new[3, 1] = bbox[2, 1] + bbox_new[4, 0] = W - bbox[4, 0] + bbox_new[4, 1] = bbox[4, 1] + bbox_new[5, 0] = W - bbox[6, 0] + bbox_new[5, 1] = bbox[6, 1] + bbox_new[6, 0] = W - bbox[5, 0] + bbox_new[6, 1] = bbox[5, 1] + else: + bbox_new = None + + if mask is not None: + mask = mask[:, ::-1] + else: + mask = None + img = img[:, ::-1].copy() + return img, mask, landmark_new, bbox_new + + + + def get_source_transforms(self): + return A.Compose([ + A.Compose([ + A.RGBShift((-20, 20), (-20, 20), (-20, 20), p=0.3), + A.HueSaturationValue(hue_shift_limit=(-0.3, 0.3), sat_shift_limit=(-0.3, 0.3), + val_shift_limit=(-0.3, 0.3), p=1), + A.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1), contrast_limit=(-0.1, 0.1), p=1), + ], p=1), + + A.OneOf([ + RandomDownScale(p=1), + A.Sharpen(alpha=(0.2, 0.5), lightness=(0.5, 1.0), p=1), + ], p=1), + + ], p=1.) + + def get_fg_bg(self, one_lmk_path): + """ + Get foreground and background paths + """ + bg_lmk_path = one_lmk_path + # Randomly pick one from the nearest neighbors for the foreground + if bg_lmk_path in self.face_info: + fg_lmk_path = random.choice(self.face_info[bg_lmk_path]) + else: + fg_lmk_path = bg_lmk_path + return fg_lmk_path, bg_lmk_path + + def get_transforms(self): + return A.Compose([ + + A.RGBShift((-20, 20), (-20, 20), (-20, 20), p=0.3), + A.HueSaturationValue(hue_shift_limit=(-0.3, 0.3), sat_shift_limit=(-0.3, 0.3), + val_shift_limit=(-0.3, 0.3), p=0.3), + A.RandomBrightnessContrast(brightness_limit=(-0.3, 0.3), contrast_limit=(-0.3, 0.3), p=0.3), + A.ImageCompression(quality_lower=40, quality_upper=100, p=0.5), + + ], + additional_targets={f'image1': 'image'}, + p=1.) + + def randaffine(self, img, mask): + f = A.Affine( + translate_percent={'x': (-0.03, 0.03), 'y': (-0.015, 0.015)}, + scale=[0.95, 1 / 0.95], + fit_output=False, + p=1) + + g = A.ElasticTransform( + alpha=50, + sigma=7, + alpha_affine=0, + p=1, + ) + + transformed = f(image=img, mask=mask) + img = transformed['image'] + + mask = transformed['mask'] + transformed = g(image=img, mask=mask) + mask = transformed['mask'] + return img, mask + + def __len__(self): + return len(self.real_images_list) + + + def colorTransfer(self, src, dst, mask): + transferredDst = np.copy(dst) + maskIndices = np.where(mask != 0) + maskedSrc = src[maskIndices[0], maskIndices[1]].astype(np.float32) + maskedDst = dst[maskIndices[0], maskIndices[1]].astype(np.float32) + + # Compute means and standard deviations + meanSrc = np.mean(maskedSrc, axis=0) + stdSrc = np.std(maskedSrc, axis=0) + meanDst = np.mean(maskedDst, axis=0) + stdDst = np.std(maskedDst, axis=0) + + # Perform color transfer + maskedDst = (maskedDst - meanDst) * (stdSrc / stdDst) + meanSrc + maskedDst = np.clip(maskedDst, 0, 255) + + # Copy the entire background into transferredDst + transferredDst = np.copy(dst) + # Now apply color transfer only to the masked region + transferredDst[maskIndices[0], maskIndices[1]] = maskedDst.astype(np.uint8) + + return transferredDst + + + + def two_blending(self, img_bg, img_fg, landmark): + H, W = len(img_bg), len(img_bg[0]) + if np.random.rand() < 0.25: + landmark = landmark[:68] + logging.disable(logging.FATAL) + mask = random_get_hull(landmark, img_bg) + logging.disable(logging.NOTSET) + source = img_fg.copy() + target = img_bg.copy() + # if np.random.rand() < 0.5: + # source = self.source_transforms(image=source.astype(np.uint8))['image'] + # else: + # target = self.source_transforms(image=target.astype(np.uint8))['image'] + source_v2, mask_v2 = self.randaffine(source, mask) + source_v3=self.colorTransfer(target,source_v2,mask_v2) + img_blended, mask = dynamic_blend(source_v3, target, mask_v2) + img_blended = img_blended.astype(np.uint8) + img = img_bg.astype(np.uint8) + + return img, img_blended, mask.squeeze(2) + + + def __getitem__(self, index): + image_path_bg = self.real_images_list[index] + label = 0 + + # Get the mask and landmark paths + landmark_path_bg = image_path_bg.replace('frames', 'landmarks').replace('.png', '.npy') # Use .npy for landmark + landmark_path_fg, landmark_path_bg = self.get_fg_bg(landmark_path_bg) + image_path_fg = landmark_path_fg.replace('landmarks','frames').replace('.npy','.png') + try: + image_bg = self.load_rgb(image_path_bg) + image_fg = self.load_rgb(image_path_fg) + except Exception as e: + # Skip this image and return the first one + print(f"Error loading image at index {index}: {e}") + return self.__getitem__(0) + image_bg = np.array(image_bg) # Convert to numpy array for data augmentation + image_fg = np.array(image_fg) # Convert to numpy array for data augmentation + + landmarks_bg = self.load_landmark(landmark_path_bg) + landmarks_fg = self.load_landmark(landmark_path_fg) + + + landmarks_bg = np.clip(landmarks_bg, 0, self.config['resolution'] - 1) + landmarks_bg = self.reorder_landmark(landmarks_bg) + + img_r, img_f, mask_f = self.two_blending(image_bg.copy(), image_fg.copy(),landmarks_bg.copy()) + transformed = self.transforms(image=img_f.astype('uint8'), image1=img_r.astype('uint8')) + img_f = transformed['image'] + img_r = transformed['image1'] + # img_f = img_f.transpose((2, 0, 1)) + # img_r = img_r.transpose((2, 0, 1)) + img_f = self.normalize(self.to_tensor(img_f)) + img_r = self.normalize(self.to_tensor(img_r)) + mask_f = self.to_tensor(mask_f) + mask_r=torch.zeros_like(mask_f) # zeros or ones + return img_f, img_r, mask_f,mask_r + + @staticmethod + def collate_fn(batch): + img_f, img_r, mask_f,mask_r = zip(*batch) + data = {} + fake_mask = torch.stack(mask_f,dim=0) + real_mask = torch.stack(mask_r, dim=0) + fake_images = torch.stack(img_f, dim=0) + real_images = torch.stack(img_r, dim=0) + data['image'] = torch.cat([real_images, fake_images], dim=0) + data['label'] = torch.tensor([0] * len(img_r) + [1] * len(img_f)) + data['landmark'] = None + data['mask'] = torch.cat([real_mask, fake_mask], dim=0) + return data + + +if __name__ == '__main__': + detector_path = r"./training/config/detector/xception.yaml" + # weights_path = "./ckpts/xception/CDFv2/tb_v1/ov.pth" + with open(detector_path, 'r') as f: + config = yaml.safe_load(f) + with open('./training/config/train_config.yaml', 'r') as f: + config2 = yaml.safe_load(f) + config2['data_manner'] = 'lmdb' + config['dataset_json_folder'] = 'preprocessing/dataset_json_v3' + config.update(config2) + dataset = I2GDataset(config=config) + batch_size = 2 + dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True,collate_fn=dataset.collate_fn) + + for i, batch in enumerate(dataloader): + print(f"Batch {i}: {batch}") + continue + diff --git a/training/dataset/__init__.py b/training/dataset/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8d34851aef959f7487e13f3d8d195e650abb0489 --- /dev/null +++ b/training/dataset/__init__.py @@ -0,0 +1,19 @@ +import os +import sys +current_file_path = os.path.abspath(__file__) +parent_dir = os.path.dirname(os.path.dirname(current_file_path)) +project_root_dir = os.path.dirname(parent_dir) +sys.path.append(parent_dir) +sys.path.append(project_root_dir) + + +from .I2G_dataset import I2GDataset +from .iid_dataset import IIDDataset +from .abstract_dataset import DeepfakeAbstractBaseDataset +from .ff_blend import FFBlendDataset +from .fwa_blend import FWABlendDataset +from .lrl_dataset import LRLDataset +from .pair_dataset import pairDataset +from .sbi_dataset import SBIDataset +from .lsda_dataset import LSDADataset +from .tall_dataset import TALLDataset diff --git a/training/dataset/abstract_dataset.py b/training/dataset/abstract_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..52289418a3b6c544c1568ba812dca8fc8546891f --- /dev/null +++ b/training/dataset/abstract_dataset.py @@ -0,0 +1,621 @@ +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2023-03-30 +# description: Abstract Base Class for all types of deepfake datasets. + +import sys + +import lmdb + +sys.path.append('.') + +import os +import math +import yaml +import glob +import json + +import numpy as np +from copy import deepcopy +import cv2 +import random +from PIL import Image +from collections import defaultdict + +import torch +from torch.autograd import Variable +from torch.utils import data +from torchvision import transforms as T + +import albumentations as A + +from .albu import IsotropicResize + +FFpp_pool=['FaceForensics++','FaceShifter','DeepFakeDetection','FF-DF','FF-F2F','FF-FS','FF-NT']# + +def all_in_pool(inputs,pool): + for each in inputs: + if each not in pool: + return False + return True + + +class DeepfakeAbstractBaseDataset(data.Dataset): + """ + Abstract base class for all deepfake datasets. + """ + def __init__(self, config=None, mode='train'): + """Initializes the dataset object. + + Args: + config (dict): A dictionary containing configuration parameters. + mode (str): A string indicating the mode (train or test). + + Raises: + NotImplementedError: If mode is not train or test. + """ + + # Set the configuration and mode + self.config = config + self.mode = mode + self.compression = config['compression'] + self.frame_num = config['frame_num'][mode] + + # Check if 'video_mode' exists in config, otherwise set video_level to False + self.video_level = config.get('video_mode', False) + self.clip_size = config.get('clip_size', None) + self.lmdb = config.get('lmdb', False) + # Dataset dictionary + self.image_list = [] + self.label_list = [] + + # Set the dataset dictionary based on the mode + if mode == 'train': + dataset_list = config['train_dataset'] + # Training data should be collected together for training + image_list, label_list = [], [] + for one_data in dataset_list: + tmp_image, tmp_label, tmp_name = self.collect_img_and_label_for_one_dataset(one_data) + image_list.extend(tmp_image) + label_list.extend(tmp_label) + if self.lmdb: + if len(dataset_list)>1: + if all_in_pool(dataset_list,FFpp_pool): + lmdb_path = os.path.join(config['lmdb_dir'], f"FaceForensics++_lmdb") + self.env = lmdb.open(lmdb_path, create=False, subdir=True, readonly=True, lock=False) + else: + raise ValueError('Training with multiple dataset and lmdb is not implemented yet.') + else: + lmdb_path = os.path.join(config['lmdb_dir'], f"{dataset_list[0] if dataset_list[0] not in FFpp_pool else 'FaceForensics++'}_lmdb") + self.env = lmdb.open(lmdb_path, create=False, subdir=True, readonly=True, lock=False) + elif mode == 'test': + one_data = config['test_dataset'] + # Test dataset should be evaluated separately. So collect only one dataset each time + image_list, label_list, name_list = self.collect_img_and_label_for_one_dataset(one_data) + if self.lmdb: + lmdb_path = os.path.join(config['lmdb_dir'], f"{one_data}_lmdb" if one_data not in FFpp_pool else 'FaceForensics++_lmdb') + self.env = lmdb.open(lmdb_path, create=False, subdir=True, readonly=True, lock=False) + else: + raise NotImplementedError('Only train and test modes are supported.') + + assert len(image_list)!=0 and len(label_list)!=0, f"Collect nothing for {mode} mode!" + self.image_list, self.label_list = image_list, label_list + + + # Create a dictionary containing the image and label lists + self.data_dict = { + 'image': self.image_list, + 'label': self.label_list, + } + + self.transform = self.init_data_aug_method() + + def init_data_aug_method(self): + trans = A.Compose([ + A.HorizontalFlip(p=self.config['data_aug']['flip_prob']), + A.Rotate(limit=self.config['data_aug']['rotate_limit'], p=self.config['data_aug']['rotate_prob']), + A.GaussianBlur(blur_limit=self.config['data_aug']['blur_limit'], p=self.config['data_aug']['blur_prob']), + A.OneOf([ + IsotropicResize(max_side=self.config['resolution'], interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC), + IsotropicResize(max_side=self.config['resolution'], interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_LINEAR), + IsotropicResize(max_side=self.config['resolution'], interpolation_down=cv2.INTER_LINEAR, interpolation_up=cv2.INTER_LINEAR), + ], p = 0 if self.config['with_landmark'] else 1), + A.OneOf([ + A.RandomBrightnessContrast(brightness_limit=self.config['data_aug']['brightness_limit'], contrast_limit=self.config['data_aug']['contrast_limit']), + A.FancyPCA(), + A.HueSaturationValue() + ], p=0.5), + A.ImageCompression(quality_lower=self.config['data_aug']['quality_lower'], quality_upper=self.config['data_aug']['quality_upper'], p=0.5) + ], + keypoint_params=A.KeypointParams(format='xy') if self.config['with_landmark'] else None + ) + return trans + + def rescale_landmarks(self, landmarks, original_size=256, new_size=224): + scale_factor = new_size / original_size + rescaled_landmarks = landmarks * scale_factor + return rescaled_landmarks + + + def collect_img_and_label_for_one_dataset(self, dataset_name: str): + """Collects image and label lists. + + Args: + dataset_name (str): A list containing one dataset information. e.g., 'FF-F2F' + + Returns: + list: A list of image paths. + list: A list of labels. + + Raises: + ValueError: If image paths or labels are not found. + NotImplementedError: If the dataset is not implemented yet. + """ + # Initialize the label and frame path lists + label_list = [] + frame_path_list = [] + + # Record video name for video-level metrics + video_name_list = [] + + # Try to get the dataset information from the JSON file + if not os.path.exists(self.config['dataset_json_folder']): + self.config['dataset_json_folder'] = self.config['dataset_json_folder'].replace('/Youtu_Pangu_Security_Public', '/Youtu_Pangu_Security/public') + try: + with open(os.path.join(self.config['dataset_json_folder'], dataset_name + '.json'), 'r') as f: + dataset_info = json.load(f) + except Exception as e: + print(e) + raise ValueError(f'dataset {dataset_name} not exist!') + + # If JSON file exists, do the following data collection + # FIXME: ugly, need to be modified here. + cp = None + if dataset_name == 'FaceForensics++_c40': + dataset_name = 'FaceForensics++' + cp = 'c40' + elif dataset_name == 'FF-DF_c40': + dataset_name = 'FF-DF' + cp = 'c40' + elif dataset_name == 'FF-F2F_c40': + dataset_name = 'FF-F2F' + cp = 'c40' + elif dataset_name == 'FF-FS_c40': + dataset_name = 'FF-FS' + cp = 'c40' + elif dataset_name == 'FF-NT_c40': + dataset_name = 'FF-NT' + cp = 'c40' + # Get the information for the current dataset + for label in dataset_info[dataset_name]: + sub_dataset_info = dataset_info[dataset_name][label][self.mode] + # Special case for FaceForensics++ and DeepFakeDetection, choose the compression type + if cp == None and dataset_name in ['FF-DF', 'FF-F2F', 'FF-FS', 'FF-NT', 'FaceForensics++','DeepFakeDetection','FaceShifter']: + sub_dataset_info = sub_dataset_info[self.compression] + elif cp == 'c40' and dataset_name in ['FF-DF', 'FF-F2F', 'FF-FS', 'FF-NT', 'FaceForensics++','DeepFakeDetection','FaceShifter']: + sub_dataset_info = sub_dataset_info['c40'] + + # Iterate over the videos in the dataset + for video_name, video_info in sub_dataset_info.items(): + # Unique video name + unique_video_name = video_info['label'] + '_' + video_name + + # Get the label and frame paths for the current video + if video_info['label'] not in self.config['label_dict']: + raise ValueError(f'Label {video_info["label"]} is not found in the configuration file.') + label = self.config['label_dict'][video_info['label']] + frame_paths = video_info['frames'] + # sorted video path to the lists + if '\\' in frame_paths[0]: + frame_paths = sorted(frame_paths, key=lambda x: int(x.split('\\')[-1].split('.')[0])) + else: + frame_paths = sorted(frame_paths, key=lambda x: int(x.split('/')[-1].split('.')[0])) + + # Consider the case when the actual number of frames (e.g., 270) is larger than the specified (i.e., self.frame_num=32) + # In this case, we select self.frame_num frames from the original 270 frames + total_frames = len(frame_paths) + if self.frame_num < total_frames: + total_frames = self.frame_num + if self.video_level: + # Select clip_size continuous frames + start_frame = random.randint(0, total_frames - self.frame_num) if self.mode == 'train' else 0 + frame_paths = frame_paths[start_frame:start_frame + self.frame_num] # update total_frames + else: + # Select self.frame_num frames evenly distributed throughout the video + step = total_frames // self.frame_num + frame_paths = [frame_paths[i] for i in range(0, total_frames, step)][:self.frame_num] + + # If video-level methods, crop clips from the selected frames if needed + if self.video_level: + if self.clip_size is None: + raise ValueError('clip_size must be specified when video_level is True.') + # Check if the number of total frames is greater than or equal to clip_size + if total_frames >= self.clip_size: + # Initialize an empty list to store the selected continuous frames + selected_clips = [] + + # Calculate the number of clips to select + num_clips = total_frames // self.clip_size + + if num_clips > 1: + # Calculate the step size between each clip + clip_step = (total_frames - self.clip_size) // (num_clips - 1) + + # Select clip_size continuous frames from each part of the video + for i in range(num_clips): + # Ensure start_frame + self.clip_size - 1 does not exceed the index of the last frame + start_frame = random.randrange(i * clip_step, min((i + 1) * clip_step, total_frames - self.clip_size + 1)) if self.mode == 'train' else i * clip_step + continuous_frames = frame_paths[start_frame:start_frame + self.clip_size] + assert len(continuous_frames) == self.clip_size, 'clip_size is not equal to the length of frame_path_list' + selected_clips.append(continuous_frames) + + else: + start_frame = random.randrange(0, total_frames - self.clip_size + 1) if self.mode == 'train' else 0 + continuous_frames = frame_paths[start_frame:start_frame + self.clip_size] + assert len(continuous_frames)==self.clip_size, 'clip_size is not equal to the length of frame_path_list' + selected_clips.append(continuous_frames) + + # Append the list of selected clips and append the label + label_list.extend([label] * len(selected_clips)) + frame_path_list.extend(selected_clips) + # video name save + video_name_list.extend([unique_video_name] * len(selected_clips)) + + else: + print(f"Skipping video {unique_video_name} because it has less than clip_size ({self.clip_size}) frames ({total_frames}).") + + # Otherwise, extend the label and frame paths to the lists according to the number of frames + else: + # Extend the label and frame paths to the lists according to the number of frames + label_list.extend([label] * total_frames) + frame_path_list.extend(frame_paths) + # video name save + video_name_list.extend([unique_video_name] * len(frame_paths)) + + # Shuffle the label and frame path lists in the same order + shuffled = list(zip(label_list, frame_path_list, video_name_list)) + random.shuffle(shuffled) + label_list, frame_path_list, video_name_list = zip(*shuffled) + + return frame_path_list, label_list, video_name_list + + + def load_rgb(self, file_path): + """ + Load an RGB image from a file path and resize it to a specified resolution. + + Args: + file_path: A string indicating the path to the image file. + + Returns: + An Image object containing the loaded and resized image. + + Raises: + ValueError: If the loaded image is None. + """ + size = self.config['resolution'] # if self.mode == "train" else self.config['resolution'] + if not self.lmdb: + if not file_path[0] == '.': + file_path = f'{self.config["rgb_dir"]}'+file_path + assert os.path.exists(file_path), f"{file_path} does not exist" + img = cv2.imread(file_path) + if img is None: + raise ValueError('Loaded image is None: {}'.format(file_path)) + elif self.lmdb: + with self.env.begin(write=False) as txn: + # transfer the path format from rgb-path to lmdb-key + if file_path[0]=='.': + file_path=file_path.replace('./datasets\\','') + + image_bin = txn.get(file_path.encode()) + image_buf = np.frombuffer(image_bin, dtype=np.uint8) + img = cv2.imdecode(image_buf, cv2.IMREAD_COLOR) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = cv2.resize(img, (size, size), interpolation=cv2.INTER_CUBIC) + return Image.fromarray(np.array(img, dtype=np.uint8)) + + + def load_mask(self, file_path): + """ + Load a binary mask image from a file path and resize it to a specified resolution. + + Args: + file_path: A string indicating the path to the mask file. + + Returns: + A numpy array containing the loaded and resized mask. + + Raises: + None. + """ + size = self.config['resolution'] + if file_path is None: + return np.zeros((size, size, 1)) + if not self.lmdb: + if not file_path[0] == '.': + file_path = f'./{self.config["rgb_dir"]}\\'+file_path + if os.path.exists(file_path): + mask = cv2.imread(file_path, 0) + if mask is None: + mask = np.zeros((size, size)) + else: + return np.zeros((size, size, 1)) + else: + with self.env.begin(write=False) as txn: + # transfer the path format from rgb-path to lmdb-key + if file_path[0]=='.': + file_path=file_path.replace('./datasets\\','') + + image_bin = txn.get(file_path.encode()) + if image_bin is None: + mask = np.zeros((size, size,3)) + else: + image_buf = np.frombuffer(image_bin, dtype=np.uint8) + # cv2.IMREAD_GRAYSCALE为灰度图,cv2.IMREAD_COLOR为彩色图 + mask = cv2.imdecode(image_buf, cv2.IMREAD_COLOR) + mask = cv2.resize(mask, (size, size)) / 255 + mask = np.expand_dims(mask, axis=2) + return np.float32(mask) + + def load_landmark(self, file_path): + """ + Load 2D facial landmarks from a file path. + + Args: + file_path: A string indicating the path to the landmark file. + + Returns: + A numpy array containing the loaded landmarks. + + Raises: + None. + """ + if file_path is None: + return np.zeros((81, 2)) + if not self.lmdb: + if not file_path[0] == '.': + file_path = f'./{self.config["rgb_dir"]}\\'+file_path + if os.path.exists(file_path): + landmark = np.load(file_path) + else: + return np.zeros((81, 2)) + else: + with self.env.begin(write=False) as txn: + # transfer the path format from rgb-path to lmdb-key + if file_path[0]=='.': + file_path=file_path.replace('./datasets\\','') + binary = txn.get(file_path.encode()) + landmark = np.frombuffer(binary, dtype=np.uint32).reshape((81, 2)) + landmark=self.rescale_landmarks(np.float32(landmark), original_size=256, new_size=self.config['resolution']) + return landmark + + def to_tensor(self, img): + """ + Convert an image to a PyTorch tensor. + """ + return T.ToTensor()(img) + + def normalize(self, img): + """ + Normalize an image. + """ + mean = self.config['mean'] + std = self.config['std'] + normalize = T.Normalize(mean=mean, std=std) + return normalize(img) + + def data_aug(self, img, landmark=None, mask=None, augmentation_seed=None): + """ + Apply data augmentation to an image, landmark, and mask. + + Args: + img: An Image object containing the image to be augmented. + landmark: A numpy array containing the 2D facial landmarks to be augmented. + mask: A numpy array containing the binary mask to be augmented. + + Returns: + The augmented image, landmark, and mask. + """ + + # Set the seed for the random number generator + if augmentation_seed is not None: + random.seed(augmentation_seed) + np.random.seed(augmentation_seed) + + # Create a dictionary of arguments + kwargs = {'image': img} + + # Check if the landmark and mask are not None + if landmark is not None: + kwargs['keypoints'] = landmark + kwargs['keypoint_params'] = A.KeypointParams(format='xy') + if mask is not None: + mask = mask.squeeze(2) + if mask.max() > 0: + kwargs['mask'] = mask + + # Apply data augmentation + transformed = self.transform(**kwargs) + + # Get the augmented image, landmark, and mask + augmented_img = transformed['image'] + augmented_landmark = transformed.get('keypoints') + augmented_mask = transformed.get('mask',mask) + + # Convert the augmented landmark to a numpy array + if augmented_landmark is not None: + augmented_landmark = np.array(augmented_landmark) + + # Reset the seeds to ensure different transformations for different videos + if augmentation_seed is not None: + random.seed() + np.random.seed() + + return augmented_img, augmented_landmark, augmented_mask + + def __getitem__(self, index, no_norm=False): + """ + Returns the data point at the given index. + + Args: + index (int): The index of the data point. + + Returns: + A tuple containing the image tensor, the label tensor, the landmark tensor, + and the mask tensor. + """ + # Get the image paths and label + image_paths = self.data_dict['image'][index] + label = self.data_dict['label'][index] + + if not isinstance(image_paths, list): + image_paths = [image_paths] # for the image-level IO, only one frame is used + + image_tensors = [] + landmark_tensors = [] + mask_tensors = [] + augmentation_seed = None + + for image_path in image_paths: + # Initialize a new seed for data augmentation at the start of each video + if self.video_level and image_path == image_paths[0]: + augmentation_seed = random.randint(0, 2**32 - 1) + + # Get the mask and landmark paths + mask_path = image_path.replace('frames', 'masks') # Use .png for mask + landmark_path = image_path.replace('frames', 'landmarks').replace('.png', '.npy') # Use .npy for landmark + + # Load the image + try: + image = self.load_rgb(image_path) + except Exception as e: + # Skip this image and return the first one + print(f"Error loading image at index {index}: {e}") + return self.__getitem__(0) + image = np.array(image) # Convert to numpy array for data augmentation + + # Load mask and landmark (if needed) + if self.config['with_mask']: + mask = self.load_mask(mask_path) + else: + mask = None + if self.config['with_landmark']: + landmarks = self.load_landmark(landmark_path) + else: + landmarks = None + + # Do Data Augmentation + if self.mode == 'train' and self.config['use_data_augmentation']: + image_trans, landmarks_trans, mask_trans = self.data_aug(image, landmarks, mask, augmentation_seed) + else: + image_trans, landmarks_trans, mask_trans = deepcopy(image), deepcopy(landmarks), deepcopy(mask) + + + # To tensor and normalize + if not no_norm: + image_trans = self.normalize(self.to_tensor(image_trans)) + if self.config['with_landmark']: + landmarks_trans = torch.from_numpy(landmarks) + if self.config['with_mask']: + mask_trans = torch.from_numpy(mask_trans) + + image_tensors.append(image_trans) + landmark_tensors.append(landmarks_trans) + mask_tensors.append(mask_trans) + + if self.video_level: + # Stack image tensors along a new dimension (time) + image_tensors = torch.stack(image_tensors, dim=0) + # Stack landmark and mask tensors along a new dimension (time) + if not any(landmark is None or (isinstance(landmark, list) and None in landmark) for landmark in landmark_tensors): + landmark_tensors = torch.stack(landmark_tensors, dim=0) + if not any(m is None or (isinstance(m, list) and None in m) for m in mask_tensors): + mask_tensors = torch.stack(mask_tensors, dim=0) + else: + # Get the first image tensor + image_tensors = image_tensors[0] + # Get the first landmark and mask tensors + if not any(landmark is None or (isinstance(landmark, list) and None in landmark) for landmark in landmark_tensors): + landmark_tensors = landmark_tensors[0] + if not any(m is None or (isinstance(m, list) and None in m) for m in mask_tensors): + mask_tensors = mask_tensors[0] + + return image_tensors, label, landmark_tensors, mask_tensors + + @staticmethod + def collate_fn(batch): + """ + Collate a batch of data points. + + Args: + batch (list): A list of tuples containing the image tensor, the label tensor, + the landmark tensor, and the mask tensor. + + Returns: + A tuple containing the image tensor, the label tensor, the landmark tensor, + and the mask tensor. + """ + # Separate the image, label, landmark, and mask tensors + images, labels, landmarks, masks = zip(*batch) + + # Stack the image, label, landmark, and mask tensors + images = torch.stack(images, dim=0) + labels = torch.LongTensor(labels) + + # Special case for landmarks and masks if they are None + if not any(landmark is None or (isinstance(landmark, list) and None in landmark) for landmark in landmarks): + landmarks = torch.stack(landmarks, dim=0) + else: + landmarks = None + + if not any(m is None or (isinstance(m, list) and None in m) for m in masks): + masks = torch.stack(masks, dim=0) + else: + masks = None + + # Create a dictionary of the tensors + data_dict = {} + data_dict['image'] = images + data_dict['label'] = labels + data_dict['landmark'] = landmarks + data_dict['mask'] = masks + return data_dict + + def __len__(self): + """ + Return the length of the dataset. + + Args: + None. + + Returns: + An integer indicating the length of the dataset. + + Raises: + AssertionError: If the number of images and labels in the dataset are not equal. + """ + assert len(self.image_list) == len(self.label_list), 'Number of images and labels are not equal' + return len(self.image_list) + + +if __name__ == "__main__": + with open('/data/home/zhiyuanyan/DeepfakeBench/training/config/detector/video_baseline.yaml', 'r') as f: + config = yaml.safe_load(f) + train_set = DeepfakeAbstractBaseDataset( + config = config, + mode = 'train', + ) + train_data_loader = \ + torch.utils.data.DataLoader( + dataset=train_set, + batch_size=config['train_batchSize'], + shuffle=True, + num_workers=0, + collate_fn=train_set.collate_fn, + ) + from tqdm import tqdm + for iteration, batch in enumerate(tqdm(train_data_loader)): + # print(iteration) + ... + # if iteration > 10: + # break diff --git a/training/dataset/albu.py b/training/dataset/albu.py new file mode 100644 index 0000000000000000000000000000000000000000..2bd8f3aaa5a91c83893180601094505f672769f4 --- /dev/null +++ b/training/dataset/albu.py @@ -0,0 +1,99 @@ +import random + +import cv2 +import numpy as np +from albumentations import DualTransform, ImageOnlyTransform +from albumentations.augmentations.crops.functional import crop + + +def isotropically_resize_image(img, size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC): + h, w = img.shape[:2] + if max(w, h) == size: + return img + if w > h: + scale = size / w + h = h * scale + w = size + else: + scale = size / h + w = w * scale + h = size + interpolation = interpolation_up if scale > 1 else interpolation_down + resized = cv2.resize(img, (int(w), int(h)), interpolation=interpolation) + return resized + + +class IsotropicResize(DualTransform): + def __init__(self, max_side, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC, + always_apply=False, p=1): + super(IsotropicResize, self).__init__(always_apply, p) + self.max_side = max_side + self.interpolation_down = interpolation_down + self.interpolation_up = interpolation_up + + def apply(self, img, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC, **params): + return isotropically_resize_image(img, size=self.max_side, interpolation_down=interpolation_down, + interpolation_up=interpolation_up) + + def apply_to_mask(self, img, **params): + return self.apply(img, interpolation_down=cv2.INTER_NEAREST, interpolation_up=cv2.INTER_NEAREST, **params) + + def get_transform_init_args_names(self): + return ("max_side", "interpolation_down", "interpolation_up") + + +class Resize4xAndBack(ImageOnlyTransform): + def __init__(self, always_apply=False, p=0.5): + super(Resize4xAndBack, self).__init__(always_apply, p) + + def apply(self, img, **params): + h, w = img.shape[:2] + scale = random.choice([2, 4]) + img = cv2.resize(img, (w // scale, h // scale), interpolation=cv2.INTER_AREA) + img = cv2.resize(img, (w, h), + interpolation=random.choice([cv2.INTER_CUBIC, cv2.INTER_LINEAR, cv2.INTER_NEAREST])) + return img + + +class RandomSizedCropNonEmptyMaskIfExists(DualTransform): + + def __init__(self, min_max_height, w2h_ratio=[0.7, 1.3], always_apply=False, p=0.5): + super(RandomSizedCropNonEmptyMaskIfExists, self).__init__(always_apply, p) + + self.min_max_height = min_max_height + self.w2h_ratio = w2h_ratio + + def apply(self, img, x_min=0, x_max=0, y_min=0, y_max=0, **params): + cropped = crop(img, x_min, y_min, x_max, y_max) + return cropped + + @property + def targets_as_params(self): + return ["mask"] + + def get_params_dependent_on_targets(self, params): + mask = params["mask"] + mask_height, mask_width = mask.shape[:2] + crop_height = int(mask_height * random.uniform(self.min_max_height[0], self.min_max_height[1])) + w2h_ratio = random.uniform(*self.w2h_ratio) + crop_width = min(int(crop_height * w2h_ratio), mask_width - 1) + if mask.sum() == 0: + x_min = random.randint(0, mask_width - crop_width + 1) + y_min = random.randint(0, mask_height - crop_height + 1) + else: + mask = mask.sum(axis=-1) if mask.ndim == 3 else mask + non_zero_yx = np.argwhere(mask) + y, x = random.choice(non_zero_yx) + x_min = x - random.randint(0, crop_width - 1) + y_min = y - random.randint(0, crop_height - 1) + x_min = np.clip(x_min, 0, mask_width - crop_width) + y_min = np.clip(y_min, 0, mask_height - crop_height) + + x_max = x_min + crop_height + y_max = y_min + crop_width + y_max = min(mask_height, y_max) + x_max = min(mask_width, x_max) + return {"x_min": x_min, "x_max": x_max, "y_min": y_min, "y_max": y_max} + + def get_transform_init_args_names(self): + return "min_max_height", "height", "width", "w2h_ratio" \ No newline at end of file diff --git a/training/dataset/face_utils.py b/training/dataset/face_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..27ae0f48f93b1453950ffdcdef54b909c5c0314f --- /dev/null +++ b/training/dataset/face_utils.py @@ -0,0 +1,238 @@ +import cv2 +import numpy as np +from skimage import transform as trans +# from mtcnn.mtcnn import MTCNN + + +def get_keypts(face): + # get key points from the results of mtcnn + + if len(face['keypoints']) == 0: + return [] + + leye = np.array(face['keypoints']['left_eye'], dtype=np.int).reshape(-1, 2) + reye = np.array(face['keypoints']['right_eye'], + dtype=np.int).reshape(-1, 2) + nose = np.array(face['keypoints']['nose'], dtype=np.int).reshape(-1, 2) + lmouth = np.array(face['keypoints']['mouth_left'], + dtype=np.int).reshape(-1, 2) + rmouth = np.array(face['keypoints']['mouth_right'], + dtype=np.int).reshape(-1, 2) + + pts = np.concatenate([leye, reye, nose, lmouth, rmouth], axis=0) + + return pts + + +def img_align_crop(img, landmark=None, outsize=None, scale=1.3, mask=None): + """ align and crop the face according to the given bbox and landmarks + landmark: 5 key points + """ + + M = None + + target_size = [112, 112] + + dst = np.array([ + [30.2946, 51.6963], + [65.5318, 51.5014], + [48.0252, 71.7366], + [33.5493, 92.3655], + [62.7299, 92.2041]], dtype=np.float32) + + if target_size[1] == 112: + dst[:, 0] += 8.0 + + dst[:, 0] = dst[:, 0] * outsize[0] / target_size[0] + dst[:, 1] = dst[:, 1] * outsize[1] / target_size[1] + + target_size = outsize + + margin_rate = scale - 1 + x_margin = target_size[0] * margin_rate / 2. + y_margin = target_size[1] * margin_rate / 2. + + # move + dst[:, 0] += x_margin + dst[:, 1] += y_margin + + # resize + dst[:, 0] *= target_size[0] / (target_size[0] + 2 * x_margin) + dst[:, 1] *= target_size[1] / (target_size[1] + 2 * y_margin) + + src = landmark.astype(np.float32) + + # use skimage tranformation + tform = trans.SimilarityTransform() + tform.estimate(src, dst) + M = tform.params[0:2, :] + + # M: use opencv + # M = cv2.getAffineTransform(src[[0,1,2],:],dst[[0,1,2],:]) + + img = cv2.warpAffine(img, M, (target_size[1], target_size[0])) + + if outsize is not None: + img = cv2.resize(img, (outsize[1], outsize[0])) + + if mask is not None: + mask = cv2.warpAffine(mask, M, (target_size[1], target_size[0])) + mask = cv2.resize(mask, (outsize[1], outsize[0])) + return img, mask + else: + return img + + + + + +def expand_bbox(bbox, width, height, scale=1.3, minsize=None): + """ + Expand original boundingbox by scale. + :param bbx: original boundingbox + :param width: frame width + :param height: frame height + :param scale: bounding box size multiplier to get a bigger face region + :param minsize: set minimum bounding box size + :return: expanded bbox + """ + x, y, w, h = bbox + + # box center + cx = int(x + w / 2) + cy = int(y + h / 2) + + # expand by scale factor + new_size = max(int(w * scale), int(h * scale)) + new_x = max(0, int(cx - new_size / 2)) + new_y = max(0, int(cy - new_size / 2)) + + # Check for too big bbox for given x, y + new_size = min(width - new_x, new_size) + new_size = min(height - new_size, new_size) + + return new_x, new_y, new_size, new_size + + +def extract_face_MTCNN(face_detector, image, expand_scale=1.3, res=256): + # Image size + height, width = image.shape[:2] + + # Convert to rgb + rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + + # Detect with dlib + faces = face_detector.detect_faces(rgb) + if len(faces): + # For now only take biggest face + face = None + bbox = None + max_region = 0 + for ff in faces: + if max_region == 0: + face = ff + bbox = face['box'] + max_region = bbox[2]*bbox[3] + else: + bb = ff['box'] + region = bb[2]*bb[3] + if region > max_rigion: + max_rigion = region + face = ff + bbox = face['box'] + print(max_region) + #face = faces[0] + + #bbox = face['box'] + + # --- Prediction --------------------------------------------------- + # Face crop with MTCNN and bounding box scale enlargement + x, y, w, h = expand_bbox(bbox, width, height, scale=expand_scale) + cropped_face = rgb[y:y+h, x:x+w] + + cropped_face = cv2.resize( + cropped_face, (res, res), interpolation=cv2.INTER_CUBIC) + cropped_face = cv2.cvtColor(cropped_face, cv2.COLOR_RGB2BGR) + return cropped_face + + return None + + +def extract_aligned_face_MTCNN(face_detector, image, expand_scale=1.3, res=256, mask=None): + # Image size + height, width = image.shape[:2] + + # Convert to rgb + rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + + # Detect with dlib + faces = face_detector.detect_faces(rgb) + if len(faces): + # For now only take biggest face + face = None + bbox = None + max_region = 0 + for i, ff in enumerate(faces): + if max_region == 0: + face = ff + bbox = face['box'] + max_region = bbox[2]*bbox[3] + else: + bb = ff['box'] + region = bb[2]*bb[3] + if region > max_region: + max_region = region + face = ff + bbox = face['box'] + #print('face {}: {}'.format(i, max_region)) + #face = faces[0] + + landmarks = get_keypts(face) + + # --- Prediction --------------------------------------------------- + # Face aligned crop with MTCNN and bounding box scale enlargement + if mask is not None: + cropped_face, cropped_mask = img_align_crop(rgb, landmarks, outsize=[ + res, res], scale=expand_scale, mask=mask) + cropped_face = cv2.cvtColor(cropped_face, cv2.COLOR_RGB2BGR) + cropped_mask = cv2.cvtColor(cropped_mask, cv2.COLOR_RGB2GRAY) + return cropped_face, cropped_mask + else: + cropped_face = img_align_crop(rgb, landmarks, outsize=[ + res, res], scale=expand_scale) + cropped_face = cv2.cvtColor(cropped_face, cv2.COLOR_RGB2BGR) + return cropped_face + + return None + + +def extract_face_DLIB(face_detector, image, expand_scale=1.3, res=256): + # Image size + height, width = image.shape[:2] + + # Convert to gray + gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + + # Detect with dlib + faces = face_detector(gray, 1) + if len(faces): + # For now only take biggest face + face = faces[0] + + x1 = face.left() + y1 = face.top() + x2 = face.right() + y2 = face.bottom() + bbox = (x1, y1, x2-x1, y2-y1) + + # --- Prediction --------------------------------------------------- + # Face crop with dlib and bounding box scale enlargement + x, y, w, h = expand_bbox(bbox, width, height, scale=expand_scale) + cropped_face = image[y:y+h, x:x+w] + + cropped_face = cv2.resize( + cropped_face, (res, res), interpolation=cv2.INTER_CUBIC) + + return cropped_face + + return None diff --git a/training/dataset/ff_blend.py b/training/dataset/ff_blend.py new file mode 100644 index 0000000000000000000000000000000000000000..d10c5303f735abe396931d93b7b5a02d595fe5aa --- /dev/null +++ b/training/dataset/ff_blend.py @@ -0,0 +1,572 @@ +''' +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2023-03-30 + +The code is designed for Face X-ray. +''' + +import os +import sys +import json +import pickle +import time + +import lmdb +import numpy as np +import albumentations as A +import cv2 +import random +from PIL import Image +from skimage.util import random_noise +from scipy import linalg +import heapq as hq +import lmdb +import torch +from torch.autograd import Variable +from torch.utils import data +from torchvision import transforms as T +import torchvision + +from dataset.utils.face_blend import * +from dataset.utils.face_align import get_align_mat_new +from dataset.utils.color_transfer import color_transfer +from dataset.utils.faceswap_utils import blendImages as alpha_blend_fea +from dataset.utils.faceswap_utils import AlphaBlend as alpha_blend +from dataset.utils.face_aug import aug_one_im, change_res +from dataset.utils.image_ae import get_pretraiend_ae +from dataset.utils.warp import warp_mask +from dataset.utils import faceswap +from scipy.ndimage.filters import gaussian_filter + + +class RandomDownScale(A.core.transforms_interface.ImageOnlyTransform): + def apply(self,img,**params): + return self.randomdownscale(img) + + def randomdownscale(self,img): + keep_ratio=True + keep_input_shape=True + H,W,C=img.shape + ratio_list=[2,4] + r=ratio_list[np.random.randint(len(ratio_list))] + img_ds=cv2.resize(img,(int(W/r),int(H/r)),interpolation=cv2.INTER_NEAREST) + if keep_input_shape: + img_ds=cv2.resize(img_ds,(W,H),interpolation=cv2.INTER_LINEAR) + return img_ds + + +class FFBlendDataset(data.Dataset): + def __init__(self, config=None): + + self.lmdb = config.get('lmdb', False) + if self.lmdb: + lmdb_path = os.path.join(config['lmdb_dir'], f"FaceForensics++_lmdb") + self.env = lmdb.open(lmdb_path, create=False, subdir=True, readonly=True, lock=False) + + # Check if the dictionary has already been created + if os.path.exists('training/lib/nearest_face_info.pkl'): + with open('training/lib/nearest_face_info.pkl', 'rb') as f: + face_info = pickle.load(f) + else: + raise ValueError(f"Need to run the dataset/generate_xray_nearest.py before training the face xray.") + self.face_info = face_info + # Check if the dictionary has already been created + if os.path.exists('training/lib/landmark_dict_ffall.pkl'): + with open('training/lib/landmark_dict_ffall.pkl', 'rb') as f: + landmark_dict = pickle.load(f) + self.landmark_dict = landmark_dict + self.imid_list = self.get_training_imglist() + self.transforms = T.Compose([ + # T.GaussianBlur(kernel_size=3, sigma=(0.1, 2.0)), + # T.ColorJitter(hue=.05, saturation=.05), + # T.RandomHorizontalFlip(), + # T.RandomRotation(20, resample=Image.BILINEAR), + T.ToTensor(), + T.Normalize(mean=[0.5, 0.5, 0.5], + std=[0.5, 0.5, 0.5]) + ]) + self.data_dict = { + 'imid_list': self.imid_list + } + self.config=config + # def data_aug(self, im): + # """ + # Apply data augmentation on the input image. + # """ + # transform = T.Compose([ + # T.ToPILImage(), + # T.GaussianBlur(kernel_size=3, sigma=(0.1, 2.0)), + # T.ColorJitter(hue=.05, saturation=.05), + # ]) + # # Apply transformations + # im_aug = transform(im) + # return im_aug + + def blended_aug(self, im): + transform = A.Compose([ + A.RGBShift((-20,20),(-20,20),(-20,20),p=0.3), + A.HueSaturationValue(hue_shift_limit=(-0.3,0.3), sat_shift_limit=(-0.3,0.3), val_shift_limit=(-0.3,0.3), p=0.3), + A.RandomBrightnessContrast(brightness_limit=(-0.3,0.3), contrast_limit=(-0.3,0.3), p=0.3), + A.ImageCompression(quality_lower=40, quality_upper=100,p=0.5) + ]) + # Apply transformations + im_aug = transform(image=im) + return im_aug['image'] + + + def data_aug(self, im): + """ + Apply data augmentation on the input image using albumentations. + """ + transform = A.Compose([ + A.Compose([ + A.RGBShift((-20,20),(-20,20),(-20,20),p=0.3), + A.HueSaturationValue(hue_shift_limit=(-0.3,0.3), sat_shift_limit=(-0.3,0.3), val_shift_limit=(-0.3,0.3), p=1), + A.RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1,0.1), p=1), + ],p=1), + A.OneOf([ + RandomDownScale(p=1), + A.Sharpen(alpha=(0.2, 0.5), lightness=(0.5, 1.0), p=1), + ],p=1), + ], p=1.) + # Apply transformations + im_aug = transform(image=im) + return im_aug['image'] + + + def get_training_imglist(self): + """ + Get the list of training images. + """ + random.seed(1024) # Fix the random seed for reproducibility + imid_list = list(self.landmark_dict.keys()) + # imid_list = [imid.replace('landmarks', 'frames').replace('npy', 'png') for imid in imid_list] + random.shuffle(imid_list) + return imid_list + + def load_rgb(self, file_path): + """ + Load an RGB image from a file path and resize it to a specified resolution. + + Args: + file_path: A string indicating the path to the image file. + + Returns: + An Image object containing the loaded and resized image. + + Raises: + ValueError: If the loaded image is None. + """ + size = self.config['resolution'] # if self.mode == "train" else self.config['resolution'] + if not self.lmdb: + if not file_path[0] == '.': + file_path = f'./{self.config["rgb_dir"]}\\'+file_path + assert os.path.exists(file_path), f"{file_path} does not exist" + img = cv2.imread(file_path) + if img is None: + raise ValueError('Loaded image is None: {}'.format(file_path)) + elif self.lmdb: + with self.env.begin(write=False) as txn: + # transfer the path format from rgb-path to lmdb-key + if file_path[0]=='.': + file_path=file_path.replace('./datasets\\','') + + image_bin = txn.get(file_path.encode()) + image_buf = np.frombuffer(image_bin, dtype=np.uint8) + img = cv2.imdecode(image_buf, cv2.IMREAD_COLOR) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = cv2.resize(img, (size, size), interpolation=cv2.INTER_CUBIC) + return np.array(img, dtype=np.uint8) + + + def load_mask(self, file_path): + """ + Load a binary mask image from a file path and resize it to a specified resolution. + + Args: + file_path: A string indicating the path to the mask file. + + Returns: + A numpy array containing the loaded and resized mask. + + Raises: + None. + """ + size = self.config['resolution'] + if file_path is None: + if not file_path[0] == '.': + file_path = f'./{self.config["rgb_dir"]}\\'+file_path + return np.zeros((size, size, 1)) + if not self.lmdb: + if os.path.exists(file_path): + mask = cv2.imread(file_path, 0) + if mask is None: + mask = np.zeros((size, size)) + else: + return np.zeros((size, size, 1)) + else: + with self.env.begin(write=False) as txn: + # transfer the path format from rgb-path to lmdb-key + if file_path[0]=='.': + file_path=file_path.replace('./datasets\\','') + image_bin = txn.get(file_path.encode()) + image_buf = np.frombuffer(image_bin, dtype=np.uint8) + # cv2.IMREAD_GRAYSCALE为灰度图,cv2.IMREAD_COLOR为彩色图 + mask = cv2.imdecode(image_buf, cv2.IMREAD_COLOR) + mask = cv2.resize(mask, (size, size)) / 255 + mask = np.expand_dims(mask, axis=2) + return np.float32(mask) + + def load_landmark(self, file_path): + """ + Load 2D facial landmarks from a file path. + + Args: + file_path: A string indicating the path to the landmark file. + + Returns: + A numpy array containing the loaded landmarks. + + Raises: + None. + """ + if file_path is None: + return np.zeros((81, 2)) + if not self.lmdb: + if not file_path[0] == '.': + file_path = f'./{self.config["rgb_dir"]}\\'+file_path + if os.path.exists(file_path): + landmark = np.load(file_path) + else: + return np.zeros((81, 2)) + else: + with self.env.begin(write=False) as txn: + # transfer the path format from rgb-path to lmdb-key + if file_path[0]=='.': + file_path=file_path.replace('./datasets\\','') + binary = txn.get(file_path.encode()) + landmark = np.frombuffer(binary, dtype=np.uint32).reshape((81, 2)) + return np.float32(landmark) + + def preprocess_images(self, imid_fg, imid_bg): + """ + Load foreground and background images and face shapes. + """ + fg_im = self.load_rgb(imid_fg.replace('landmarks', 'frames').replace('npy', 'png')) + fg_im = np.array(self.data_aug(fg_im)) + fg_shape = self.landmark_dict[imid_fg] + fg_shape = np.array(fg_shape, dtype=np.int32) + + bg_im = self.load_rgb(imid_bg.replace('landmarks', 'frames').replace('npy', 'png')) + bg_im = np.array(self.data_aug(bg_im)) + bg_shape = self.landmark_dict[imid_bg] + bg_shape = np.array(bg_shape, dtype=np.int32) + + if fg_im is None: + return bg_im, bg_shape, bg_im, bg_shape + elif bg_im is None: + return fg_im, fg_shape, fg_im, fg_shape + + return fg_im, fg_shape, bg_im, bg_shape + + + def get_fg_bg(self, one_lmk_path): + """ + Get foreground and background paths + """ + bg_lmk_path = one_lmk_path + # Randomly pick one from the nearest neighbors for the foreground + if bg_lmk_path in self.face_info: + fg_lmk_path = random.choice(self.face_info[bg_lmk_path]) + else: + fg_lmk_path = bg_lmk_path + + return fg_lmk_path, bg_lmk_path + + + def generate_masks(self, fg_im, fg_shape, bg_im, bg_shape): + """ + Generate masks for foreground and background images. + """ + fg_mask = get_mask(fg_shape, fg_im, deform=False) + bg_mask = get_mask(bg_shape, bg_im, deform=True) + + # # Only do the postprocess for the background mask + bg_mask_postprocess = warp_mask(bg_mask, std=20) + return fg_mask, bg_mask_postprocess + + + def warp_images(self, fg_im, fg_shape, bg_im, bg_shape, fg_mask): + """ + Warp foreground face onto background image using affine or 3D warping. + """ + H, W, C = bg_im.shape + use_3d_warp = np.random.rand() < 0.5 + + if not use_3d_warp: + aff_param = np.array(get_align_mat_new(fg_shape, bg_shape)).reshape(2, 3) + warped_face = cv2.warpAffine(fg_im, aff_param, (W, H), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REFLECT) + fg_mask = cv2.warpAffine(fg_mask, aff_param, (W, H), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REFLECT) + fg_mask = fg_mask > 0 + else: + warped_face = faceswap.warp_image_3d(fg_im, np.array(fg_shape[:48]), np.array(bg_shape[:48]), (H, W)) + fg_mask = np.mean(warped_face, axis=2) > 0 + + return warped_face, fg_mask + + + def colorTransfer(self, src, dst, mask): + transferredDst = np.copy(dst) + maskIndices = np.where(mask != 0) + maskedSrc = src[maskIndices[0], maskIndices[1]].astype(np.float32) + maskedDst = dst[maskIndices[0], maskIndices[1]].astype(np.float32) + + # Compute means and standard deviations + meanSrc = np.mean(maskedSrc, axis=0) + stdSrc = np.std(maskedSrc, axis=0) + meanDst = np.mean(maskedDst, axis=0) + stdDst = np.std(maskedDst, axis=0) + + # Perform color transfer + maskedDst = (maskedDst - meanDst) * (stdSrc / stdDst) + meanSrc + maskedDst = np.clip(maskedDst, 0, 255) + + # Copy the entire background into transferredDst + transferredDst = np.copy(dst) + # Now apply color transfer only to the masked region + transferredDst[maskIndices[0], maskIndices[1]] = maskedDst.astype(np.uint8) + + return transferredDst + + + def blend_images(self, color_corrected_fg, bg_im, bg_mask, featherAmount=0.2): + """ + Blend foreground and background images together. + """ + # normalize the mask to have values between 0 and 1 + b_mask = bg_mask / 255. + + # Add an extra dimension and repeat the mask to match the number of channels in color_corrected_fg and bg_im + b_mask = np.repeat(b_mask[:, :, np.newaxis], 3, axis=2) + + # Compute the alpha blending + maskIndices = np.where(b_mask != 0) + maskPts = np.hstack((maskIndices[1][:, np.newaxis], maskIndices[0][:, np.newaxis])) + + # FIXME: deal with the bugs of empty maskpts + if maskPts.size == 0: + print(f"No non-zero values found in bg_mask for blending. Skipping this image.") + return color_corrected_fg # or handle this situation differently according to the needs + + faceSize = np.max(maskPts, axis=0) - np.min(maskPts, axis=0) + featherAmount = featherAmount * np.max(faceSize) + + hull = cv2.convexHull(maskPts) + dists = np.zeros(maskPts.shape[0]) + for i in range(maskPts.shape[0]): + dists[i] = cv2.pointPolygonTest(hull, (int(maskPts[i, 0]), int(maskPts[i, 1])), True) + + weights = np.clip(dists / featherAmount, 0, 1) + + # Perform the blending operation + color_corrected_fg = color_corrected_fg.astype(float) + bg_im = bg_im.astype(float) + blended_image = np.copy(bg_im) + blended_image[maskIndices[0], maskIndices[1]] = weights[:, np.newaxis] * color_corrected_fg[maskIndices[0], maskIndices[1]] + (1 - weights[:, np.newaxis]) * bg_im[maskIndices[0], maskIndices[1]] + + # Convert the blended image to 8-bit unsigned integers + blended_image = np.clip(blended_image, 0, 255) + blended_image = blended_image.astype(np.uint8) + return blended_image + + + def process_images(self, imid_fg, imid_bg, index): + """ + Overview: + Process foreground and background images following the data generation pipeline (BI dataset). + + Terminology: + Foreground (fg) image: The image containing the face that will be blended onto the background image. + Background (bg) image: The image onto which the face from the foreground image will be blended. + """ + fg_im, fg_shape, bg_im, bg_shape = self.preprocess_images(imid_fg, imid_bg) + fg_mask, bg_mask = self.generate_masks(fg_im, fg_shape, bg_im, bg_shape) + warped_face, fg_mask = self.warp_images(fg_im, fg_shape, bg_im, bg_shape, fg_mask) + + try: + # add the below two lines to make sure the bg_mask is strictly within the fg_mask + bg_mask[fg_mask == 0] = 0 + color_corrected_fg = self.colorTransfer(bg_im, warped_face, bg_mask) + blended_image = self.blend_images(color_corrected_fg, bg_im, bg_mask) + # FIXME: ugly, in order to fix the problem of mask (all zero values for bg_mask) + except: + color_corrected_fg = self.colorTransfer(bg_im, warped_face, bg_mask) + blended_image = self.blend_images(color_corrected_fg, bg_im, bg_mask) + boundary = get_boundary(bg_mask) + + # # Prepare images and titles for the combined image + # images = [fg_im, np.where(fg_mask>0, 255, 0), bg_im, bg_mask, color_corrected_fg, blended_image, np.where(boundary>0, 255, 0)] + # titles = ["Fg Image", "Fg Mask", "Bg Image", + # "Bg Mask", "Blended Region", + # "Blended Image", "Boundary"] + + # # Save the combined image + # os.makedirs('facexray_examples_3', exist_ok=True) + # self.save_combined_image(images, titles, index, f'facexray_examples_3/combined_image_{index}.png') + return blended_image, boundary, bg_im + + + def post_proc(self, img): + ''' + if self.mode == 'train': + #if np.random.rand() < 0.5: + # img = random_add_noise(img) + #add_gaussian_noise(img) + if np.random.rand() < 0.5: + #img, _ = change_res(img) + img = gaussian_blur(img) + ''' + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + im_aug = self.blended_aug(img) + im_aug = Image.fromarray(np.uint8(img)) + im_aug = self.transforms(im_aug) + return im_aug + + + @staticmethod + def save_combined_image(images, titles, index, save_path): + """ + Save the combined image with titles for each single image. + + Args: + images (List[np.ndarray]): List of images to be combined. + titles (List[str]): List of titles for each image. + index (int): Index of the image. + save_path (str): Path to save the combined image. + """ + # Determine the maximum height and width among the images + max_height = max(image.shape[0] for image in images) + max_width = max(image.shape[1] for image in images) + + # Create the canvas + canvas = np.zeros((max_height * len(images), max_width, 3), dtype=np.uint8) + + # Place the images and titles on the canvas + current_height = 0 + for image, title in zip(images, titles): + height, width = image.shape[:2] + + # Check if image has a third dimension (color channels) + if image.ndim == 2: + # If not, add a third dimension + image = np.tile(image[..., None], (1, 1, 3)) + + canvas[current_height : current_height + height, :width] = image + cv2.putText( + canvas, title, (10, current_height + 30), + cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2 + ) + current_height += height + + # Save the combined image + cv2.imwrite(save_path, canvas) + + + def __getitem__(self, index): + """ + Get an item from the dataset by index. + """ + one_lmk_path = self.imid_list[index] + try: + label = 1 if one_lmk_path.split('/')[6]=='manipulated_sequences' else 0 + except Exception as e: + label = 1 if one_lmk_path.split('\\')[6] == 'manipulated_sequences' else 0 + imid_fg, imid_bg = self.get_fg_bg(one_lmk_path) + manipulate_img, boundary, imid_bg = self.process_images(imid_fg, imid_bg, index) + + manipulate_img = self.post_proc(manipulate_img) + imid_bg = self.post_proc(imid_bg) + boundary = torch.from_numpy(boundary) + boundary = boundary.unsqueeze(2).permute(2, 0, 1) + + # fake data + fake_data_tuple = (manipulate_img, boundary, 1) + # real data + real_data_tuple = (imid_bg, torch.zeros_like(boundary), label) + + return fake_data_tuple, real_data_tuple + + + @staticmethod + def collate_fn(batch): + """ + Collates batches of data and shuffles the images. + """ + # Unzip the batch + fake_data, real_data = zip(*batch) + + # Unzip the fake and real data + fake_images, fake_boundaries, fake_labels = zip(*fake_data) + real_images, real_boundaries, real_labels = zip(*real_data) + + # Combine fake and real data + images = torch.stack(fake_images + real_images) + boundaries = torch.stack(fake_boundaries + real_boundaries) + labels = torch.tensor(fake_labels + real_labels) + + # Combine images, boundaries, and labels into tuples + combined_data = list(zip(images, boundaries, labels)) + + # Shuffle the combined data + random.shuffle(combined_data) + + # Unzip the shuffled data + images, boundaries, labels = zip(*combined_data) + + # Create the data dictionary + data_dict = { + 'image': torch.stack(images), + 'label': torch.tensor(labels), + 'mask': torch.stack(boundaries), # Assuming boundaries are your masks + 'landmark': None # Add your landmark data if available + } + + return data_dict + + + def __len__(self): + """ + Get the length of the dataset. + """ + return len(self.imid_list) + + +if __name__ == "__main__": + dataset = FFBlendDataset() + print('dataset lenth: ', len(dataset)) + + def tensor2bgr(im): + img = im.squeeze().cpu().numpy().transpose(1, 2, 0) + img = (img + 1)/2 * 255 + img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) + return img + + def tensor2gray(im): + img = im.squeeze().cpu().numpy() + img = img * 255 + return img + + for i, data_dict in enumerate(dataset): + if i > 20: + break + if label == 1: + if not use_mouth: + img, boudary = im + cv2.imwrite('{}_whole.png'.format(i), tensor2bgr(img)) + cv2.imwrite('{}_boudnary.png'.format(i), tensor2gray(boudary)) + else: + img, mouth, boudary = im + cv2.imwrite('{}_whole.png'.format(i), tensor2bgr(img)) + cv2.imwrite('{}_mouth.png'.format(i), tensor2bgr(mouth)) + cv2.imwrite('{}_boudnary.png'.format(i), tensor2gray(boudary)) diff --git a/training/dataset/fwa_blend.py b/training/dataset/fwa_blend.py new file mode 100644 index 0000000000000000000000000000000000000000..ad3c1df0bb65ccba4cd2d6aa7c31cc5c462c1efe --- /dev/null +++ b/training/dataset/fwa_blend.py @@ -0,0 +1,548 @@ +''' +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2023-03-30 + +The code is designed for FWA and mainly modified from the below link: +https://github.com/yuezunli/DSP-FWA +''' + +import os +import sys +import json +import pickle +import time + +import dlib +import numpy as np +from copy import deepcopy +import cv2 +import random +from PIL import Image +from skimage.util import random_noise +from skimage.draw import polygon +from scipy import linalg +import heapq as hq +import albumentations as A + +import torch +from torch.autograd import Variable +from torch.utils import data +from torchvision import transforms as T +import torchvision + +from dataset.utils.face_blend import * +from dataset.utils.face_align import get_align_mat_new +from dataset.utils.color_transfer import color_transfer +from dataset.utils.faceswap_utils import blendImages as alpha_blend_fea +from dataset.utils.faceswap_utils import AlphaBlend as alpha_blend +from dataset.utils.face_aug import aug_one_im, change_res +from dataset.utils.image_ae import get_pretraiend_ae +from dataset.utils.warp import warp_mask +from dataset.utils import faceswap +from scipy.ndimage.filters import gaussian_filter +from skimage.transform import AffineTransform, warp + +from dataset.abstract_dataset import DeepfakeAbstractBaseDataset + + +# Define face detector and predictor models +face_detector = dlib.get_frontal_face_detector() +predictor_path = 'preprocessing/dlib_tools/shape_predictor_81_face_landmarks.dat' +face_predictor = dlib.shape_predictor(predictor_path) + + +mean_face_x = np.array([ + 0.000213256, 0.0752622, 0.18113, 0.29077, 0.393397, 0.586856, 0.689483, 0.799124, + 0.904991, 0.98004, 0.490127, 0.490127, 0.490127, 0.490127, 0.36688, 0.426036, + 0.490127, 0.554217, 0.613373, 0.121737, 0.187122, 0.265825, 0.334606, 0.260918, + 0.182743, 0.645647, 0.714428, 0.793132, 0.858516, 0.79751, 0.719335, 0.254149, + 0.340985, 0.428858, 0.490127, 0.551395, 0.639268, 0.726104, 0.642159, 0.556721, + 0.490127, 0.423532, 0.338094, 0.290379, 0.428096, 0.490127, 0.552157, 0.689874, + 0.553364, 0.490127, 0.42689]) + +mean_face_y = np.array([ + 0.106454, 0.038915, 0.0187482, 0.0344891, 0.0773906, 0.0773906, 0.0344891, + 0.0187482, 0.038915, 0.106454, 0.203352, 0.307009, 0.409805, 0.515625, 0.587326, + 0.609345, 0.628106, 0.609345, 0.587326, 0.216423, 0.178758, 0.179852, 0.231733, + 0.245099, 0.244077, 0.231733, 0.179852, 0.178758, 0.216423, 0.244077, 0.245099, + 0.780233, 0.745405, 0.727388, 0.742578, 0.727388, 0.745405, 0.780233, 0.864805, + 0.902192, 0.909281, 0.902192, 0.864805, 0.784792, 0.778746, 0.785343, 0.778746, + 0.784792, 0.824182, 0.831803, 0.824182]) + +landmarks_2D = np.stack([mean_face_x, mean_face_y], axis=1) + + +class RandomDownScale(A.core.transforms_interface.ImageOnlyTransform): + def apply(self,img,**params): + return self.randomdownscale(img) + + def randomdownscale(self,img): + keep_ratio=True + keep_input_shape=True + H,W,C=img.shape + ratio_list=[2,4] + r=ratio_list[np.random.randint(len(ratio_list))] + img_ds=cv2.resize(img,(int(W/r),int(H/r)),interpolation=cv2.INTER_NEAREST) + if keep_input_shape: + img_ds=cv2.resize(img_ds,(W,H),interpolation=cv2.INTER_LINEAR) + return img_ds + + +def umeyama( src, dst, estimate_scale ): + """Estimate N-D similarity transformation with or without scaling. + Parameters + ---------- + src : (M, N) array + Source coordinates. + dst : (M, N) array + Destination coordinates. + estimate_scale : bool + Whether to estimate scaling factor. + Returns + ------- + T : (N + 1, N + 1) + The homogeneous similarity transformation matrix. The matrix contains + NaN values only if the problem is not well-conditioned. + References + ---------- + .. [1] "Least-squares estimation of transformation parameters between two + point patterns", Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573 + """ + + num = src.shape[0] + dim = src.shape[1] + + # Compute mean of src and dst. + src_mean = src.mean(axis=0) + dst_mean = dst.mean(axis=0) + + # Subtract mean from src and dst. + src_demean = src - src_mean + dst_demean = dst - dst_mean + + # Eq. (38). + A = np.dot(dst_demean.T, src_demean) / num + + # Eq. (39). + d = np.ones((dim,), dtype=np.double) + if np.linalg.det(A) < 0: + d[dim - 1] = -1 + + T = np.eye(dim + 1, dtype=np.double) + + U, S, V = np.linalg.svd(A) + + # Eq. (40) and (43). + rank = np.linalg.matrix_rank(A) + if rank == 0: + return np.nan * T + elif rank == dim - 1: + if np.linalg.det(U) * np.linalg.det(V) > 0: + T[:dim, :dim] = np.dot(U, V) + else: + s = d[dim - 1] + d[dim - 1] = -1 + T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V)) + d[dim - 1] = s + else: + T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V.T)) + + if estimate_scale: + # Eq. (41) and (42). + scale = 1.0 / src_demean.var(axis=0).sum() * np.dot(S, d) + else: + scale = 1.0 + + T[:dim, dim] = dst_mean - scale * np.dot(T[:dim, :dim], src_mean.T) + T[:dim, :dim] *= scale + + return T + + +def shape_to_np(shape, dtype="int"): + # initialize the list of (x, y)-coordinates + coords = np.zeros((68, 2), dtype=dtype) + + # loop over the 68 facial landmarks and convert them + # to a 2-tuple of (x, y)-coordinates + for i in range(0, 68): + coords[i] = (shape.part(i).x, shape.part(i).y) + + # return the list of (x, y)-coordinates + return coords + + +from skimage.transform import AffineTransform, warp + +def get_warped_face(face, landmarks, tform): + """ + Apply the given affine transformation to the face and landmarks. + + Args: + face (np.ndarray): The face image to be transformed. + landmarks (np.ndarray): The facial landmarks to be transformed. + tform (AffineTransform): The transformation to apply. + + Returns: + warped_face (np.ndarray): The transformed face image. + warped_landmarks (np.ndarray): The transformed facial landmarks. + """ + # Apply the transformation to the face + warped_face = warp(face, tform.inverse, output_shape=face.shape) + warped_face = (warped_face * 255).astype(np.uint8) + + # Apply the transformation to the landmarks + warped_landmarks = tform.inverse(landmarks) + + return warped_face, warped_landmarks + + +def warp_face_within_landmarks(face, landmarks, tform): + """ + Apply the given affine transformation to the face and landmarks, + and retain only the area within the landmarks. + + Args: + face (np.ndarray): The face image to be transformed. + landmarks (np.ndarray): The facial landmarks to be transformed. + tform (AffineTransform): The transformation to apply. + + Returns: + warped_face (np.ndarray): The transformed face image. + warped_landmarks (np.ndarray): The transformed facial landmarks. + """ + # Apply the transformation to the face + warped_face = warp(face, tform.inverse, output_shape=face.shape) + warped_face = (warped_face * 255).astype(np.uint8) + + # Apply the transformation to the landmarks + warped_landmarks = np.linalg.inv(landmarks) + + # Generate a mask based on the landmarks + rr, cc = polygon(warped_landmarks[:, 1], warped_landmarks[:, 0]) + mask = np.zeros_like(warped_face, dtype=np.uint8) + mask[rr, cc] = 1 + + # Apply the mask to the face + warped_face *= mask + + return warped_face, warped_landmarks + + +def get_2d_aligned_face(image, mat, size, padding=[0, 0]): + mat = mat * size + mat[0, 2] += padding[0] + mat[1, 2] += padding[1] + return cv2.warpAffine(image, mat, (size + 2 * padding[0], size + 2 * padding[1])) + + +def get_2d_aligned_landmarks(face_cache, aligned_face_size=256, padding=(0, 0)): + mat, points = face_cache + # Mapping landmarks to aligned face + pred_ = np.concatenate([points, np.ones((points.shape[0], 1))], axis=-1) + pred_ = np.transpose(pred_) + mat = mat * aligned_face_size + mat[0, 2] += padding[0] + mat[1, 2] += padding[1] + aligned_shape = np.dot(mat, pred_) + aligned_shape = np.transpose(aligned_shape[:2, :]) + return aligned_shape + + +def get_aligned_face_and_landmarks(im, face_cache, aligned_face_size = 256, padding=(0, 0)): + """ + get all aligned faces and landmarks of all images + :param imgs: origin images + :param fa: face_alignment package + :return: + """ + aligned_cur_shapes = [] + aligned_cur_im = [] + for mat, points in face_cache: + # Get transform matrix + aligned_face = get_2d_aligned_face(im, mat, aligned_face_size, padding) + aligned_shape = get_2d_aligned_landmarks([mat, points], aligned_face_size, padding) + aligned_cur_shapes.append(aligned_shape) + aligned_cur_im.append(aligned_face) + return aligned_cur_im, aligned_cur_shapes + + +def face_warp(im, face, trans_matrix, size, padding): + new_face = np.clip(face, 0, 255).astype(im.dtype) + image_size = im.shape[1], im.shape[0] + + tmp_matrix = trans_matrix * size + delta_matrix = np.array([[0., 0., padding[0]*1.0], [0., 0., padding[1]*1.0]]) + tmp_matrix = tmp_matrix + delta_matrix + + # Warp the new face onto a blank canvas + warped_face = np.zeros_like(im) + cv2.warpAffine(new_face, tmp_matrix, image_size, warped_face, cv2.WARP_INVERSE_MAP, + cv2.BORDER_TRANSPARENT) + + # Create a mask of the warped face + mask = (warped_face > 0).astype(np.uint8) + + # Blend the warped face with the original image + new_image = im * (1 - mask) + warped_face * mask + + return new_image, mask + + +def get_face_loc(im, face_detector, scale=0): + """ get face locations, color order of images is rgb """ + faces = face_detector(np.uint8(im), scale) + face_list = [] + if faces is not None or len(faces) > 0: + for i, d in enumerate(faces): + try: + face_list.append([d.left(), d.top(), d.right(), d.bottom()]) + except: + face_list.append([d.rect.left(), d.rect.top(), d.rect.right(), d.rect.bottom()]) + return face_list + + + +def align(im, face_detector, lmark_predictor, scale=0): + # This version we handle all faces in view + # channel order rgb + im = np.uint8(im) + faces = face_detector(im, scale) + face_list = [] + if faces is not None or len(faces) > 0: + for pred in faces: + try: + points = shape_to_np(lmark_predictor(im, pred)) + except: + points = shape_to_np(lmark_predictor(im, pred.rect)) + trans_matrix = umeyama(points[17:], landmarks_2D, True)[0:2] + face_list.append([trans_matrix, points]) + return face_list + + +class FWABlendDataset(DeepfakeAbstractBaseDataset): + def __init__(self, config=None): + super().__init__(config, mode='train') + self.transforms = T.Compose([ + T.ToTensor(), + T.Normalize(mean=config['mean'], + std=config['std']) + ]) + self.resolution = config['resolution'] + + + def blended_aug(self, im): + transform = A.Compose([ + A.RGBShift((-20,20),(-20,20),(-20,20),p=0.3), + A.HueSaturationValue(hue_shift_limit=(-0.3,0.3), sat_shift_limit=(-0.3,0.3), val_shift_limit=(-0.3,0.3), p=0.3), + A.RandomBrightnessContrast(brightness_limit=(-0.3,0.3), contrast_limit=(-0.3,0.3), p=0.3), + A.ImageCompression(quality_lower=40, quality_upper=100,p=0.5) + ]) + # Apply transformations + im_aug = transform(image=im) + return im_aug['image'] + + + def data_aug(self, im): + """ + Apply data augmentation on the input image using albumentations. + """ + transform = A.Compose([ + A.Compose([ + A.RGBShift((-20,20),(-20,20),(-20,20),p=0.3), + A.HueSaturationValue(hue_shift_limit=(-0.3,0.3), sat_shift_limit=(-0.3,0.3), val_shift_limit=(-0.3,0.3), p=1), + A.RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1,0.1), p=1), + ],p=1), + A.OneOf([ + RandomDownScale(p=1), + A.Sharpen(alpha=(0.2, 0.5), lightness=(0.5, 1.0), p=1), + ],p=1), + ], p=1.) + # Apply transformations + im_aug = transform(image=im) + return im_aug['image'] + + + def blend_images(self, img_path): + #im = cv2.imread(img_path) + im = np.array(self.load_rgb(img_path)) + + # Get the alignment of the head + face_cache = align(im, face_detector, face_predictor) + + # Get the aligned face and landmarks + aligned_im_head, aligned_shape = get_aligned_face_and_landmarks(im, face_cache) + # If no faces were detected in the image, return None (or any suitable value) + if len(aligned_im_head) == 0 or len(aligned_shape) == 0: + return None, None + aligned_im_head = aligned_im_head[0] + aligned_shape = aligned_shape[0] + + # Apply transformations to the face + scale_factor = random.choice([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) + scaled_face = cv2.resize(aligned_im_head, (0, 0), fx=scale_factor, fy=scale_factor) + + # Apply Gaussian blur to the scaled face + blurred_face = cv2.GaussianBlur(scaled_face, (5, 5), 0) + + # Resize the processed image back to the original size + resized_face = cv2.resize(blurred_face, (aligned_im_head.shape[1], aligned_im_head.shape[0])) + + # Generate a random facial mask + mask = get_mask(aligned_shape.astype(np.float32), resized_face, std=20, deform=True) + + # Apply the mask to the resized face + masked_face = cv2.bitwise_and(resized_face, resized_face, mask=mask) + + # do aug before warp + im = np.array(self.blended_aug(im)) + + # Warp the face back to the original image + im, masked_face = face_warp(im, masked_face, face_cache[0][0], self.resolution, [0, 0]) + shape = get_2d_aligned_landmarks(face_cache[0], self.resolution, [0, 0]) + return im, masked_face + + + def process_images(self, img_path, index): + """ + Process an image following the data generation pipeline. + """ + blended_im, mask = self.blend_images(img_path) + + # Prepare images and titles for the combined image + imid_fg = np.array(self.load_rgb(img_path)) + imid_fg = np.array(self.data_aug(imid_fg)) + + if blended_im is None or mask is None: + return imid_fg, None + + # images = [ + # imid_fg, + # np.where(mask.astype(np.uint8)>0, 255, 0), + # blended_im, + # ] + # titles = ["Image", "Mask", "Blended Image"] + + # # Save the combined image + # os.makedirs('fwa_examples_2', exist_ok=True) + # self.save_combined_image(images, titles, index, f'fwa_examples_2/combined_image_{index}.png') + return imid_fg, blended_im + + + def post_proc(self, img): + ''' + if self.mode == 'train': + #if np.random.rand() < 0.5: + # img = random_add_noise(img) + #add_gaussian_noise(img) + if np.random.rand() < 0.5: + #img, _ = change_res(img) + img = gaussian_blur(img) + ''' + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + im_aug = self.blended_aug(img) + im_aug = Image.fromarray(np.uint8(img)) + im_aug = self.transforms(im_aug) + return im_aug + + + @staticmethod + def save_combined_image(images, titles, index, save_path): + """ + Save the combined image with titles for each single image. + + Args: + images (List[np.ndarray]): List of images to be combined. + titles (List[str]): List of titles for each image. + index (int): Index of the image. + save_path (str): Path to save the combined image. + """ + # Determine the maximum height and width among the images + max_height = max(image.shape[0] for image in images) + max_width = max(image.shape[1] for image in images) + + # Create the canvas + canvas = np.zeros((max_height * len(images), max_width, 3), dtype=np.uint8) + + # Place the images and titles on the canvas + current_height = 0 + for image, title in zip(images, titles): + height, width = image.shape[:2] + + # Check if image has a third dimension (color channels) + if image.ndim == 2: + # If not, add a third dimension + image = np.tile(image[..., None], (1, 1, 3)) + + canvas[current_height : current_height + height, :width] = image + cv2.putText( + canvas, title, (10, current_height + 30), + cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2 + ) + current_height += height + + # Save the combined image + cv2.imwrite(save_path, canvas) + + + def __getitem__(self, index): + """ + Get an item from the dataset by index. + """ + one_img_path = self.data_dict['image'][index] + try: + label = 1 if one_img_path.split('/')[6]=='manipulated_sequences' else 0 + except Exception as e: + label = 1 if one_img_path.split('\\')[6] == 'manipulated_sequences' else 0 + blend_label = 1 + imid, manipulate_img = self.process_images(one_img_path, index) + + if manipulate_img is None: + manipulate_img = deepcopy(imid) + blend_label = label + manipulate_img = self.post_proc(manipulate_img) + imid = self.post_proc(imid) + + # blend data + fake_data_tuple = (manipulate_img, blend_label) + # original data + real_data_tuple = (imid, label) + + return fake_data_tuple, real_data_tuple + + + @staticmethod + def collate_fn(batch): + """ + Collates batches of data and shuffles the images. + """ + # Unzip the batch + fake_data, real_data = zip(*batch) + + # Unzip the fake and real data + fake_images, fake_labels = zip(*fake_data) + real_images, real_labels = zip(*real_data) + + # Combine fake and real data + images = torch.stack(fake_images + real_images) + labels = torch.tensor(fake_labels + real_labels) + + # Combine images, boundaries, and labels into tuples + combined_data = list(zip(images, labels)) + + # Shuffle the combined data + random.shuffle(combined_data) + + # Unzip the shuffled data + images, labels = zip(*combined_data) + + # Create the data dictionary + data_dict = { + 'image': torch.stack(images), + 'label': torch.tensor(labels), + 'mask': None, + 'landmark': None # Add your landmark data if available + } + + return data_dict diff --git a/training/dataset/generate_parsing_mask.py b/training/dataset/generate_parsing_mask.py new file mode 100644 index 0000000000000000000000000000000000000000..0282dd764cd262e52fdd96fc2729bc8585efccea --- /dev/null +++ b/training/dataset/generate_parsing_mask.py @@ -0,0 +1,129 @@ +''' +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2024-01-26 + +The code is designed for self-blending method (SBI, CVPR 2024). +''' + +import sys +sys.path.append('.') + +import os +import cv2 +import yaml +import random +import torch +import torch.nn as nn +from PIL import Image +import numpy as np +from copy import deepcopy +import albumentations as A +from training.dataset.abstract_dataset import DeepfakeAbstractBaseDataset +from training.dataset.sbi_api import SBI_API +from training.dataset.utils.bi_online_generation_yzy import random_get_hull +from training.dataset.SimSwap.test_one_image import self_blend + +import warnings +warnings.filterwarnings('ignore') + + +from transformers import SegformerImageProcessor, SegformerForSemanticSegmentation +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +image_processor = SegformerImageProcessor.from_pretrained("/Youtu_Pangu_Security_Public/youtu-pangu-public/zhiyuanyan/huggingface/hub/models--jonathandinu--face-parsing/snapshots/a2bf62f39dfd8f8856a3c19be8b0707a8d68abdd") +face_parser = SegformerForSemanticSegmentation.from_pretrained("/Youtu_Pangu_Security_Public/youtu-pangu-public/zhiyuanyan/huggingface/hub/models--jonathandinu--face-parsing/snapshots/a2bf62f39dfd8f8856a3c19be8b0707a8d68abdd").to(device) + + +def create_facial_mask(mask, with_neck=False): + facial_labels = [1, 2, 3, 4, 5, 6, 7, 10, 11, 12] + if with_neck: + facial_labels += [17] + facial_mask = np.zeros_like(mask, dtype=bool) + for label in facial_labels: + facial_mask |= (mask == label) + return facial_mask.astype(np.uint8) * 255 + + +def face_parsing_mask(img1, with_neck=False): + # run inference on image + img1 = Image.fromarray(img1) + inputs = image_processor(images=img1, return_tensors="pt").to(device) + outputs = face_parser(**inputs) + logits = outputs.logits # shape (batch_size, num_labels, ~height/4, ~width/4) + + # resize output to match input image dimensions + upsampled_logits = nn.functional.interpolate(logits, + size=img1.size[::-1], # H x W + mode='bilinear', + align_corners=False) + labels = upsampled_logits.argmax(dim=1)[0] + mask = labels.cpu().numpy() + mask = create_facial_mask(mask, with_neck) + return mask + + +class YZYDataset(DeepfakeAbstractBaseDataset): + def __init__(self, config=None, mode='train'): + super().__init__(config, mode) + + # Get real lists + # Fix the label of real images to be 0 + self.real_imglist = [(img, label) for img, label in zip(self.image_list, self.label_list) if label == 0] + + + def __getitem__(self, index): + # Get the real image paths and labels + real_image_path, real_label = self.real_imglist[index] + # real_image_path = real_image_path.replace('/Youtu_Pangu_Security_Public/', '/Youtu_Pangu_Security/public/') + + # Load the real images + real_image = self.load_rgb(real_image_path) + real_image = np.array(real_image) # Convert to numpy array + + # Face Parsing + mask = face_parsing_mask(real_image, with_neck=False) + parse_mask_path = real_image_path.replace('frames', 'parse_mask') + os.makedirs(os.path.dirname(parse_mask_path), exist_ok=True) + cv2.imwrite(parse_mask_path, mask) + + # # SRI generation + # sri_image = self_blend(real_image) + # sri_path = real_image_path.replace('frames', 'sri_frames') + # os.makedirs(os.path.dirname(sri_path), exist_ok=True) + # cv2.imwrite(sri_path, sri_image) + + @staticmethod + def collate_fn(batch): + data_dict = { + 'image': None, + 'label': None, + 'landmark': None, + 'mask': None, + } + return data_dict + + def __len__(self): + return len(self.real_imglist) + + + +if __name__ == '__main__': + with open('./training/config/detector/sbi.yaml', 'r') as f: + config = yaml.safe_load(f) + with open('./training/config/train_config.yaml', 'r') as f: + config2 = yaml.safe_load(f) + config2['data_manner'] = 'lmdb' + config['dataset_json_folder'] = '/Youtu_Pangu_Security_Public/youtu-pangu-public/zhiyuanyan/DeepfakeBenchv2/preprocessing/dataset_json' + config.update(config2) + train_set = YZYDataset(config=config, mode='train') + train_data_loader = \ + torch.utils.data.DataLoader( + dataset=train_set, + batch_size=config['train_batchSize'], + shuffle=True, + num_workers=0, + collate_fn=train_set.collate_fn, + ) + from tqdm import tqdm + for iteration, batch in enumerate(tqdm(train_data_loader)): + print(iteration) \ No newline at end of file diff --git a/training/dataset/generate_xray_nearest.py b/training/dataset/generate_xray_nearest.py new file mode 100644 index 0000000000000000000000000000000000000000..cf946f1c37e5c5cfe03a712caa9891968b9582c9 --- /dev/null +++ b/training/dataset/generate_xray_nearest.py @@ -0,0 +1,136 @@ +''' +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2023-03-30 + +The code is specifically designed for generating nearest sample pairs for Face X-ray. +Alternatively, you can utilize the pre-generated pkl files available in our GitHub repository. Please refer to the "Releases" section on our repository for accessing these files. +''' + +import os +import json +import pickle +import numpy as np +import heapq +import random +from tqdm import tqdm +from scipy.spatial import KDTree + + +def load_landmark(file_path): + """ + Load 2D facial landmarks from a file path. + + Args: + file_path: A string indicating the path to the landmark file. + + Returns: + A numpy array containing the loaded landmarks. + + Raises: + None. + """ + if file_path is None: + return np.zeros((81, 2)) + if os.path.exists(file_path): + landmark = np.load(file_path) + return np.float32(landmark) + else: + return np.zeros((81, 2)) + + +def get_landmark_dict(dataset_folder): + # Check if the dictionary has already been created + if os.path.exists('landmark_dict_ff.pkl'): + with open('landmark_dict_ff.pkl', 'rb') as f: + return pickle.load(f) + # Open the metadata file for the current folder + metadata_path = os.path.join(dataset_folder, "FaceForensics++.json") + with open(metadata_path, "r") as f: + metadata = json.load(f) + # Iterate over the metadata entries and add the landmark paths to the list + ff_real_data = metadata['FaceForensics++']['FF-real'] + # Using dictionary comprehension to generate the landmark_dict + landmark_dict = { + frame_path.replace('frames', 'landmarks').replace(".png", ".npy"): load_landmark( + frame_path.replace('frames', 'landmarks').replace(".png", ".npy") + ) + for mode, value in ff_real_data.items() + for video_name, video_info in tqdm(value['c23'].items()) + for frame_path in video_info['frames'] + } + # Save the dictionary to a pickle file + with open('landmark_dict_ffall.pkl', 'wb') as f: + pickle.dump(landmark_dict, f) + return landmark_dict + + +def get_nearest_faces_fixed_pair(landmark_info, num_neighbors): + ''' + Using KDTree to find the nearest faces for each image (Much faster!!) + ''' + random.seed(1024) # Fix the random seed for reproducibility + + # Check if the dictionary has already been created + if os.path.exists('nearest_face_info.pkl'): + with open('nearest_face_info.pkl', 'rb') as f: + return pickle.load(f) + + landmarks_array = np.array([lmk.flatten() for lmk in landmark_info.values()]) + landmark_ids = list(landmark_info.keys()) + + # Build a KDTree using the flattened landmarks + tree = KDTree(landmarks_array) + + nearest_faces = {} + for idx, this_lmk in tqdm(enumerate(landmarks_array), total=len(landmarks_array)): + # Query the KDTree for the nearest neighbors (excluding itself) + dists, indices = tree.query(this_lmk, k=num_neighbors + 1) + # Randomly pick one from the nearest N neighbors (excluding itself) + picked_idx = random.choice(indices[1:]) + nearest_faces[landmark_ids[idx]] = landmark_ids[picked_idx] + + # Save the dictionary to a pickle file + with open('nearest_face_info.pkl', 'wb') as f: + pickle.dump(nearest_faces, f) + + return nearest_faces + + +def get_nearest_faces(landmark_info, num_neighbors): + ''' + Using KDTree to find the nearest faces for each image (Much faster!!) + ''' + random.seed(1024) # Fix the random seed for reproducibility + + # Check if the dictionary has already been created + if os.path.exists('nearest_face_info.pkl'): + with open('nearest_face_info.pkl', 'rb') as f: + return pickle.load(f) + + landmarks_array = np.array([lmk.flatten() for lmk in landmark_info.values()]) + landmark_ids = list(landmark_info.keys()) + + # Build a KDTree using the flattened landmarks + tree = KDTree(landmarks_array) + + nearest_faces = {} + for idx, this_lmk in tqdm(enumerate(landmarks_array), total=len(landmarks_array)): + # Query the KDTree for the nearest neighbors (excluding itself) + dists, indices = tree.query(this_lmk, k=num_neighbors + 1) + # Store the nearest N neighbors (excluding itself) + nearest_faces[landmark_ids[idx]] = [landmark_ids[i] for i in indices[1:]] + + # Save the dictionary to a pickle file + with open('nearest_face_info.pkl', 'wb') as f: + pickle.dump(nearest_faces, f) + + return nearest_faces + +# Load the landmark dictionary and obtain the landmark dict +dataset_folder = "/home/zhiyuanyan/disfin/deepfake_benchmark/preprocessing/dataset_json/" +landmark_info = get_landmark_dict(dataset_folder) + +# Get the nearest faces for each image (in landmark_dict) +num_neighbors = 100 +nearest_faces_info = get_nearest_faces(landmark_info, num_neighbors) # running time: about 20 mins diff --git a/training/dataset/iid_dataset.py b/training/dataset/iid_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..97a3935ef34b92d7f3beffb70256c270fe0ad9dc --- /dev/null +++ b/training/dataset/iid_dataset.py @@ -0,0 +1,116 @@ +''' +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2023-03-30 + +The code is designed for scenarios such as disentanglement-based methods where it is necessary to ensure an equal number of positive and negative samples. +''' +import os.path +from copy import deepcopy +import cv2 +import math +import torch +import random + +import yaml +from PIL import Image, ImageDraw +import numpy as np +from torch.utils.data import DataLoader + +from dataset.abstract_dataset import DeepfakeAbstractBaseDataset + +class IIDDataset(DeepfakeAbstractBaseDataset): + def __init__(self, config=None, mode='train'): + super().__init__(config, mode) + + + def __getitem__(self, index): + # Get the image paths and label + image_path = self.data_dict['image'][index] + if '\\' in image_path: + per = image_path.split('\\')[-2] + else: + per = image_path.split('/')[-2] + id_index = int(per.split('_')[-1]) # real video id + label = self.data_dict['label'][index] + + # Load the image + try: + image = self.load_rgb(image_path) + except Exception as e: + # Skip this image and return the first one + print(f"Error loading image at index {index}: {e}") + return self.__getitem__(0) + image = np.array(image) # Convert to numpy array for data augmentation + + # Do Data Augmentation + image_trans,_,_ = self.data_aug(image) + + # To tensor and normalize + image_trans = self.normalize(self.to_tensor(image_trans)) + + return id_index, image_trans, label + + @staticmethod + def collate_fn(batch): + """ + Collate a batch of data points. + + Args: + batch (list): A list of tuples containing the image tensor, the label tensor, + the landmark tensor, and the mask tensor. + + Returns: + A tuple containing the image tensor, the label tensor, the landmark tensor, + and the mask tensor. + """ + # Separate the image, label, landmark, and mask tensors + id_indexes, image_trans, label = zip(*batch) + + # Stack the image, label, landmark, and mask tensors + images = torch.stack(image_trans, dim=0) + labels = torch.LongTensor(label) + ids = torch.LongTensor(id_indexes) + # Create a dictionary of the tensors + data_dict = {} + data_dict['image'] = images + data_dict['label'] = labels + data_dict['id_index'] = ids + data_dict['mask']=None + data_dict['landmark']=None + return data_dict + + +def draw_landmark(img,landmark): + draw = ImageDraw.Draw(img) + + # landmark = np.stack([mean_face_x, mean_face_y], axis=1) + # landmark *=256 + # 遍历每个特征点 + for i, point in enumerate(landmark): + # 在图像上标记特征点 + draw.ellipse((point[0] - 1, point[1] - 1, point[0] + 1, point[1] + 1), fill=(255, 0, 0)) + # 在特征点旁边添加序号 + draw.text((point[0], point[1]), str(i), fill=(255, 255, 255)) + return img + + +if __name__ == '__main__': + detector_path = r"./training/config/detector/xception.yaml" + # weights_path = "./ckpts/xception/CDFv2/tb_v1/ov.pth" + with open(detector_path, 'r') as f: + config = yaml.safe_load(f) + with open('./training/config/train_config.yaml', 'r') as f: + config2 = yaml.safe_load(f) + config2['data_manner'] = 'lmdb' + config['dataset_json_folder'] = 'preprocessing/dataset_json_v3' + config.update(config2) + dataset = IIDDataset(config=config) + batch_size = 2 + dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True,collate_fn=dataset.collate_fn) + + for i, batch in enumerate(dataloader): + print(f"Batch {i}: {batch}") + + # 如果数据集返回的是一个元组(例如,(data, target)),可以这样获取: + img = batch['img'] diff --git a/training/dataset/library/000_0000.png b/training/dataset/library/000_0000.png new file mode 100644 index 0000000000000000000000000000000000000000..bbb4211f6a0dfe83bb27dea42cf77efccab066db Binary files /dev/null and b/training/dataset/library/000_0000.png differ diff --git a/training/dataset/library/001_0000.png b/training/dataset/library/001_0000.png new file mode 100644 index 0000000000000000000000000000000000000000..cb749e2f540ea96be8252a900ae827883ffeed53 Binary files /dev/null and b/training/dataset/library/001_0000.png differ diff --git a/training/dataset/library/DeepFakeMask.py b/training/dataset/library/DeepFakeMask.py new file mode 100644 index 0000000000000000000000000000000000000000..3ad16cab208910d64476753d82735d79b3571ee3 --- /dev/null +++ b/training/dataset/library/DeepFakeMask.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- +# Created by: algohunt +# Microsoft Research & Peking University +# lilingzhi@pku.edu.cn +# Copyright (c) 2019 + +#!/usr/bin/env python3 +""" Masks functions for faceswap.py """ + +import inspect +import logging +import sys + +import cv2 +import numpy as np + +# logger = logging.getLogger(__name__) # pylint: disable=invalid-name + + +def get_available_masks(): + """ Return a list of the available masks for cli """ + masks = sorted([name for name, obj in inspect.getmembers(sys.modules[__name__]) + if inspect.isclass(obj) and name != "Mask"]) + masks.append("none") + # logger.debug(masks) + return masks + + +def get_default_mask(): + """ Set the default mask for cli """ + masks = get_available_masks() + default = "dfl_full" + default = default if default in masks else masks[0] + # logger.debug(default) + return default + + +class Mask(): + """ Parent class for masks + the output mask will be .mask + channels: 1, 3 or 4: + 1 - Returns a single channel mask + 3 - Returns a 3 channel mask + 4 - Returns the original image with the mask in the alpha channel """ + + def __init__(self, landmarks, face, channels=4): + # logger.info("Initializing %s: (face_shape: %s, channels: %s, landmarks: %s)", + # self.__class__.__name__, face.shape, channels, landmarks) + self.landmarks = landmarks + self.face = face + self.channels = channels + + mask = self.build_mask() + self.mask = self.merge_mask(mask) + #logger.info("Initialized %s", self.__class__.__name__) + + def build_mask(self): + """ Override to build the mask """ + raise NotImplementedError + + def merge_mask(self, mask): + """ Return the mask in requested shape """ + #logger.info("mask_shape: %s", mask.shape) + assert self.channels in (1, 3, 4), "Channels should be 1, 3 or 4" + assert mask.shape[2] == 1 and mask.ndim == 3, "Input mask be 3 dimensions with 1 channel" + + if self.channels == 3: + retval = np.tile(mask, 3) + elif self.channels == 4: + retval = np.concatenate((self.face, mask), -1) + else: + retval = mask + + #logger.info("Final mask shape: %s", retval.shape) + return retval + + +class dfl_full(Mask): # pylint: disable=invalid-name + """ DFL facial mask """ + def build_mask(self): + mask = np.zeros(self.face.shape[0:2] + (1, ), dtype=np.float32) + + nose_ridge = (self.landmarks[27:31], self.landmarks[33:34]) + jaw = (self.landmarks[0:17], + self.landmarks[48:68], + self.landmarks[0:1], + self.landmarks[8:9], + self.landmarks[16:17]) + eyes = (self.landmarks[17:27], + self.landmarks[0:1], + self.landmarks[27:28], + self.landmarks[16:17], + self.landmarks[33:34]) + parts = [jaw, nose_ridge, eyes] + + for item in parts: + merged = np.concatenate(item) + cv2.fillConvexPoly(mask, cv2.convexHull(merged), 255.) # pylint: disable=no-member + return mask + + +class components(Mask): # pylint: disable=invalid-name + """ Component model mask """ + def build_mask(self): + mask = np.zeros(self.face.shape[0:2] + (1, ), dtype=np.float32) + + r_jaw = (self.landmarks[0:9], self.landmarks[17:18]) + l_jaw = (self.landmarks[8:17], self.landmarks[26:27]) + r_cheek = (self.landmarks[17:20], self.landmarks[8:9]) + l_cheek = (self.landmarks[24:27], self.landmarks[8:9]) + nose_ridge = (self.landmarks[19:25], self.landmarks[8:9],) + r_eye = (self.landmarks[17:22], + self.landmarks[27:28], + self.landmarks[31:36], + self.landmarks[8:9]) + l_eye = (self.landmarks[22:27], + self.landmarks[27:28], + self.landmarks[31:36], + self.landmarks[8:9]) + nose = (self.landmarks[27:31], self.landmarks[31:36]) + parts = [r_jaw, l_jaw, r_cheek, l_cheek, nose_ridge, r_eye, l_eye, nose] + + for item in parts: + merged = np.concatenate(item) + cv2.fillConvexPoly(mask, cv2.convexHull(merged), 255.) # pylint: disable=no-member + return mask + + +class extended(Mask): # pylint: disable=invalid-name + """ Extended mask + Based on components mask. Attempts to extend the eyebrow points up the forehead + """ + def build_mask(self): + mask = np.zeros(self.face.shape[0:2] + (1, ), dtype=np.float32) + + landmarks = self.landmarks.copy() + # mid points between the side of face and eye point + ml_pnt = (landmarks[36] + landmarks[0]) // 2 + mr_pnt = (landmarks[16] + landmarks[45]) // 2 + + # mid points between the mid points and eye + ql_pnt = (landmarks[36] + ml_pnt) // 2 + qr_pnt = (landmarks[45] + mr_pnt) // 2 + + # Top of the eye arrays + bot_l = np.array((ql_pnt, landmarks[36], landmarks[37], landmarks[38], landmarks[39])) + bot_r = np.array((landmarks[42], landmarks[43], landmarks[44], landmarks[45], qr_pnt)) + + # Eyebrow arrays + top_l = landmarks[17:22] + top_r = landmarks[22:27] + + # Adjust eyebrow arrays + landmarks[17:22] = top_l + ((top_l - bot_l) // 2) + landmarks[22:27] = top_r + ((top_r - bot_r) // 2) + + r_jaw = (landmarks[0:9], landmarks[17:18]) + l_jaw = (landmarks[8:17], landmarks[26:27]) + r_cheek = (landmarks[17:20], landmarks[8:9]) + l_cheek = (landmarks[24:27], landmarks[8:9]) + nose_ridge = (landmarks[19:25], landmarks[8:9],) + r_eye = (landmarks[17:22], landmarks[27:28], landmarks[31:36], landmarks[8:9]) + l_eye = (landmarks[22:27], landmarks[27:28], landmarks[31:36], landmarks[8:9]) + nose = (landmarks[27:31], landmarks[31:36]) + parts = [r_jaw, l_jaw, r_cheek, l_cheek, nose_ridge, r_eye, l_eye, nose] + + for item in parts: + merged = np.concatenate(item) + cv2.fillConvexPoly(mask, cv2.convexHull(merged), 255.) # pylint: disable=no-member + return mask + + +class facehull(Mask): # pylint: disable=invalid-name + """ Basic face hull mask """ + def build_mask(self): + mask = np.zeros(self.face.shape[0:2] + (1, ), dtype=np.float32) + hull = cv2.convexHull( # pylint: disable=no-member + np.array(self.landmarks).reshape((-1, 2))) + cv2.fillConvexPoly(mask, hull, 255.0, lineType=cv2.LINE_AA) # pylint: disable=no-member + return mask \ No newline at end of file diff --git a/training/dataset/library/LICENSE b/training/dataset/library/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/training/dataset/library/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/training/dataset/library/README.md b/training/dataset/library/README.md new file mode 100644 index 0000000000000000000000000000000000000000..28ec640b626e03f2daabfe2e9b95dac0c1720004 --- /dev/null +++ b/training/dataset/library/README.md @@ -0,0 +1,12 @@ +# Face-X-ray +The author's unofficial PyTorch re-implementation of Face Xray + +This repo contains code for the BI data generation pipeline from [Face X-ray for More General Face Forgery Detection](https://arxiv.org/abs/1912.13458) by Lingzhi Li, Jianmin Bao, Ting Zhang, Hao Yang, Dong Chen, Fang Wen, Baining Guo. + +# Usage + +Just run bi_online_generation.py and you can get the following result. which is describe at Figure.5 in the paper. + +![demo](all_in_one.jpg) + +To get the whole BI dataset, you will need crop all the face and compute the landmarks as describe in the code. \ No newline at end of file diff --git a/training/dataset/library/all_in_one.jpg b/training/dataset/library/all_in_one.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f698b006872d2281bb759fd2b504bc3690ed856 Binary files /dev/null and b/training/dataset/library/all_in_one.jpg differ diff --git a/training/dataset/library/bi_online_generation.py b/training/dataset/library/bi_online_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..98ad61229ca907f4d9f2e867756efd9d1f5940da --- /dev/null +++ b/training/dataset/library/bi_online_generation.py @@ -0,0 +1,241 @@ +import dlib +from skimage import io +from skimage import transform as sktransform +import numpy as np +from matplotlib import pyplot as plt +import json +import os +import random +from PIL import Image +from imgaug import augmenters as iaa +from .DeepFakeMask import dfl_full,facehull,components,extended +import cv2 +import tqdm + +def name_resolve(path): + name = os.path.splitext(os.path.basename(path))[0] + vid_id, frame_id = name.split('_')[0:2] + return vid_id, frame_id + +def total_euclidean_distance(a,b): + assert len(a.shape) == 2 + return np.sum(np.linalg.norm(a-b,axis=1)) + +def random_get_hull(landmark,img1,hull_type): + if hull_type == 0: + mask = dfl_full(landmarks=landmark.astype('int32'),face=img1, channels=3).mask + return mask/255 + elif hull_type == 1: + mask = extended(landmarks=landmark.astype('int32'),face=img1, channels=3).mask + return mask/255 + elif hull_type == 2: + mask = components(landmarks=landmark.astype('int32'),face=img1, channels=3).mask + return mask/255 + elif hull_type == 3: + mask = facehull(landmarks=landmark.astype('int32'),face=img1, channels=3).mask + return mask/255 + +def random_erode_dilate(mask, ksize=None): + if random.random()>0.5: + if ksize is None: + ksize = random.randint(1,21) + if ksize % 2 == 0: + ksize += 1 + mask = np.array(mask).astype(np.uint8)*255 + kernel = np.ones((ksize,ksize),np.uint8) + mask = cv2.erode(mask,kernel,1)/255 + else: + if ksize is None: + ksize = random.randint(1,5) + if ksize % 2 == 0: + ksize += 1 + mask = np.array(mask).astype(np.uint8)*255 + kernel = np.ones((ksize,ksize),np.uint8) + mask = cv2.dilate(mask,kernel,1)/255 + return mask + + +# borrow from https://github.com/MarekKowalski/FaceSwap +def blendImages(src, dst, mask, featherAmount=0.2): + + maskIndices = np.where(mask != 0) + + src_mask = np.ones_like(mask) + dst_mask = np.zeros_like(mask) + + maskPts = np.hstack((maskIndices[1][:, np.newaxis], maskIndices[0][:, np.newaxis])) + faceSize = np.max(maskPts, axis=0) - np.min(maskPts, axis=0) + featherAmount = featherAmount * np.max(faceSize) + + hull = cv2.convexHull(maskPts) + dists = np.zeros(maskPts.shape[0]) + for i in range(maskPts.shape[0]): + dists[i] = cv2.pointPolygonTest(hull, (maskPts[i, 0], maskPts[i, 1]), True) + + weights = np.clip(dists / featherAmount, 0, 1) + + composedImg = np.copy(dst) + composedImg[maskIndices[0], maskIndices[1]] = weights[:, np.newaxis] * src[maskIndices[0], maskIndices[1]] + (1 - weights[:, np.newaxis]) * dst[maskIndices[0], maskIndices[1]] + + composedMask = np.copy(dst_mask) + composedMask[maskIndices[0], maskIndices[1]] = weights[:, np.newaxis] * src_mask[maskIndices[0], maskIndices[1]] + ( + 1 - weights[:, np.newaxis]) * dst_mask[maskIndices[0], maskIndices[1]] + + return composedImg, composedMask + + +# borrow from https://github.com/MarekKowalski/FaceSwap +def colorTransfer(src, dst, mask): + transferredDst = np.copy(dst) + + maskIndices = np.where(mask != 0) + + + maskedSrc = src[maskIndices[0], maskIndices[1]].astype(np.int32) + maskedDst = dst[maskIndices[0], maskIndices[1]].astype(np.int32) + + meanSrc = np.mean(maskedSrc, axis=0) + meanDst = np.mean(maskedDst, axis=0) + + maskedDst = maskedDst - meanDst + maskedDst = maskedDst + meanSrc + maskedDst = np.clip(maskedDst, 0, 255) + + transferredDst[maskIndices[0], maskIndices[1]] = maskedDst + + return transferredDst + +class BIOnlineGeneration(): + def __init__(self): + with open('precomuted_landmarks.json', 'r') as f: + self.landmarks_record = json.load(f) + for k,v in self.landmarks_record.items(): + self.landmarks_record[k] = np.array(v) + # extract all frame from all video in the name of {videoid}_{frameid} + self.data_list = [ + '000_0000.png', + '001_0000.png' + ] * 10000 + + # predefine mask distortion + self.distortion = iaa.Sequential([iaa.PiecewiseAffine(scale=(0.01, 0.15))]) + + def gen_one_datapoint(self): + background_face_path = random.choice(self.data_list) + data_type = 'real' if random.randint(0,1) else 'fake' + if data_type == 'fake' : + face_img,mask = self.get_blended_face(background_face_path) + mask = ( 1 - mask ) * mask * 4 + else: + face_img = io.imread(background_face_path) + mask = np.zeros((317, 317, 1)) + + # randomly downsample after BI pipeline + if random.randint(0,1): + aug_size = random.randint(64, 317) + face_img = Image.fromarray(face_img) + if random.randint(0,1): + face_img = face_img.resize((aug_size, aug_size), Image.BILINEAR) + else: + face_img = face_img.resize((aug_size, aug_size), Image.NEAREST) + face_img = face_img.resize((317, 317),Image.BILINEAR) + face_img = np.array(face_img) + + # random jpeg compression after BI pipeline + if random.randint(0,1): + quality = random.randint(60, 100) + encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality] + face_img_encode = cv2.imencode('.jpg', face_img, encode_param)[1] + face_img = cv2.imdecode(face_img_encode, cv2.IMREAD_COLOR) + + face_img = face_img[60:317,30:287,:] + mask = mask[60:317,30:287,:] + + # random flip + if random.randint(0,1): + face_img = np.flip(face_img,1) + mask = np.flip(mask,1) + + return face_img,mask,data_type + + def get_blended_face(self,background_face_path): + background_face = io.imread(background_face_path) + background_landmark = self.landmarks_record[background_face_path] + + foreground_face_path = self.search_similar_face(background_landmark,background_face_path) + foreground_face = io.imread(foreground_face_path) + + # down sample before blending + aug_size = random.randint(128,317) + background_landmark = background_landmark * (aug_size/317) + foreground_face = sktransform.resize(foreground_face,(aug_size,aug_size),preserve_range=True).astype(np.uint8) + background_face = sktransform.resize(background_face,(aug_size,aug_size),preserve_range=True).astype(np.uint8) + + # get random type of initial blending mask + mask = random_get_hull(background_landmark, background_face) + + # random deform mask + mask = self.distortion.augment_image(mask) + mask = random_erode_dilate(mask) + + # filte empty mask after deformation + if np.sum(mask) == 0 : + raise NotImplementedError + + # apply color transfer + foreground_face = colorTransfer(background_face, foreground_face, mask*255) + + # blend two face + blended_face, mask = blendImages(foreground_face, background_face, mask*255) + blended_face = blended_face.astype(np.uint8) + + # resize back to default resolution + blended_face = sktransform.resize(blended_face,(317,317),preserve_range=True).astype(np.uint8) + mask = sktransform.resize(mask,(317,317),preserve_range=True) + mask = mask[:,:,0:1] + return blended_face,mask + + def search_similar_face(self,this_landmark,background_face_path): + vid_id, frame_id = name_resolve(background_face_path) + min_dist = 99999999 + + # random sample 5000 frame from all frams: + all_candidate_path = random.sample( self.data_list, k=5000) + + # filter all frame that comes from the same video as background face + all_candidate_path = filter(lambda k:name_resolve(k)[0] != vid_id, all_candidate_path) + all_candidate_path = list(all_candidate_path) + + # loop throungh all candidates frame to get best match + for candidate_path in all_candidate_path: + candidate_landmark = self.landmarks_record[candidate_path].astype(np.float32) + candidate_distance = total_euclidean_distance(candidate_landmark, this_landmark) + if candidate_distance < min_dist: + min_dist = candidate_distance + min_path = candidate_path + + return min_path + +if __name__ == '__main__': + ds = BIOnlineGeneration() + from tqdm import tqdm + all_imgs = [] + for _ in tqdm(range(50)): + img,mask,label = ds.gen_one_datapoint() + mask = np.repeat(mask,3,2) + mask = (mask*255).astype(np.uint8) + img_cat = np.concatenate([img,mask],1) + all_imgs.append(img_cat) + all_in_one = Image.new('RGB', (2570,2570)) + + for x in range(5): + for y in range(10): + idx = x*10+y + im = Image.fromarray(all_imgs[idx]) + + dx = x*514 + dy = y*257 + + all_in_one.paste(im, (dx,dy)) + + all_in_one.save("all_in_one.jpg") \ No newline at end of file diff --git a/training/dataset/library/precomuted_landmarks.json b/training/dataset/library/precomuted_landmarks.json new file mode 100644 index 0000000000000000000000000000000000000000..63f32d5b6bef1a0f7718b7c632b96ce584c978cb --- /dev/null +++ b/training/dataset/library/precomuted_landmarks.json @@ -0,0 +1 @@ +{"000_0000.png": [[56, 143], [57, 168], [61, 192], [67, 216], [76, 238], [93, 257], [112, 273], [133, 288], [156, 291], [178, 287], [198, 271], [219, 256], [236, 237], [246, 216], [250, 192], [252, 167], [252, 142], [69, 131], [84, 123], [102, 123], [119, 126], [137, 132], [178, 130], [195, 122], [213, 119], [230, 119], [244, 126], [158, 149], [158, 168], [158, 186], [159, 205], [140, 211], [148, 214], [158, 219], [168, 214], [176, 210], [91, 150], [102, 143], [116, 144], [127, 154], [115, 156], [101, 156], [188, 152], [199, 142], [213, 141], [224, 148], [214, 153], [201, 154], [117, 232], [134, 229], [148, 228], [158, 231], [168, 228], [181, 229], [195, 232], [182, 246], [169, 253], [158, 254], [147, 254], [132, 247], [125, 234], [147, 238], [158, 239], [168, 237], [188, 234], [168, 237], [158, 239], [147, 238]], "001_0000.png": [[56, 143], [57, 168], [61, 192], [67, 216], [76, 238], [93, 257], [112, 273], [133, 288], [156, 291], [178, 287], [198, 271], [219, 256], [236, 237], [246, 216], [250, 192], [252, 167], [252, 142], [69, 131], [84, 123], [102, 123], [119, 126], [137, 132], [178, 130], [195, 122], [213, 119], [230, 119], [244, 126], [158, 149], [158, 168], [158, 186], [159, 205], [140, 211], [148, 214], [158, 219], [168, 214], [176, 210], [91, 150], [102, 143], [116, 144], [127, 154], [115, 156], [101, 156], [188, 152], [199, 142], [213, 141], [224, 148], [214, 153], [201, 154], [117, 232], [134, 229], [148, 228], [158, 231], [168, 228], [181, 229], [195, 232], [182, 246], [169, 253], [158, 254], [147, 254], [132, 247], [125, 234], [147, 238], [158, 239], [168, 237], [188, 234], [168, 237], [158, 239], [147, 238]]} \ No newline at end of file diff --git a/training/dataset/lrl_dataset.py b/training/dataset/lrl_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..d80323a0206c086bd6e5b00c6ebbfcd2b97bb2db --- /dev/null +++ b/training/dataset/lrl_dataset.py @@ -0,0 +1,139 @@ +import os +import sys +current_file_path = os.path.abspath(__file__) +parent_dir = os.path.dirname(os.path.dirname(current_file_path)) +project_root_dir = os.path.dirname(parent_dir) +sys.path.append(parent_dir) +sys.path.append(project_root_dir) + +import cv2 +import random +import yaml +import torch +import numpy as np +from copy import deepcopy +import albumentations as A +from .abstract_dataset import DeepfakeAbstractBaseDataset +from PIL import Image + +c=0 + +class LRLDataset(DeepfakeAbstractBaseDataset): + def __init__(self, config=None, mode='train'): + super().__init__(config, mode) + global c + c=config + + def multi_pass_filter(self, img, r1=0.33, r2=0.66): + rows, cols = img.shape + k = cols / rows + + mask = np.zeros((rows, cols), np.uint8) + x, y = np.ogrid[:rows, :cols] + mask_area = (k * x + y < r1 * cols) + mask[mask_area] = 1 + low_mask = mask + + mask = np.ones((rows, cols), np.uint8) + x, y = np.ogrid[:rows, :cols] + mask_area = (k * x + y < r2 * cols) + mask[mask_area] = 0 + high_mask = mask + + mask1 = np.zeros((rows, cols), np.uint8) + mask1[low_mask == 0] = 1 + mask2 = np.zeros((rows, cols), np.uint8) + mask2[high_mask == 0] = 1 + mid_mask = mask1 * mask2 + + return low_mask, mid_mask, high_mask + + def image2dct(self,img): + img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) + img_gray = np.float32(img_gray) + img_dct = cv2.dct(img_gray) + # img_dct = np.log(np.abs(img_dct)+1e-6) + + low_mask, mid_mask, high_mask = self.multi_pass_filter(img_dct, r1=0.33, r2=0.33) + img_dct_filterd = high_mask * img_dct + img_idct = cv2.idct(img_dct_filterd) + + return img_idct + + def __getitem__(self, index): + image_trans, label, landmark_tensors, mask_trans = super().__getitem__(index, no_norm=True) + + img_idct = self.image2dct(image_trans) + # normalize idct + img_idct = (img_idct / 255 - 0.5) / 0.5 + # img_idct = img_idct[np.newaxis, ...] + + # To tensor and normalize for fake and real images + image_trans = self.normalize(self.to_tensor(image_trans)) + img_idct_trans = self.to_tensor(img_idct) + mask_trans = torch.from_numpy(mask_trans) + mask_trans = mask_trans.squeeze(2).permute(2, 0, 1) + mask_trans = torch.mean(mask_trans, dim=0, keepdim=True) + return image_trans, label, img_idct_trans, mask_trans + + def __len__(self): + return len(self.image_list) + + + @staticmethod + def collate_fn(batch): + """ + Collate a batch of data points. + + Args: + batch (list): A list of tuples containing the image tensor and label tensor. + + Returns: + A tuple containing the image tensor, the label tensor, the landmark tensor, + and the mask tensor. + """ + global c + images, labels, img_idct_trans, masks = zip(*batch) + # Stack the image, label, landmark, and mask tensors + images = torch.stack(images, dim=0) + labels = torch.LongTensor(labels) + masks = torch.stack(masks, dim=0) + img_idct_trans = torch.stack(img_idct_trans, dim=0) + + data_dict = { + 'image': images, + 'label': labels, + 'landmark': None, + 'idct': img_idct_trans, + 'mask': masks, + } + return data_dict + + + +if __name__ == '__main__': + with open(r'H:\code\DeepfakeBench\training\config\detector\lrl_effnb4.yaml', 'r') as f: + config = yaml.safe_load(f) + with open(r'H:\code\DeepfakeBench\training\config\train_config.yaml', 'r') as f: + config2 = yaml.safe_load(f) + random.seed(config['manualSeed']) + torch.manual_seed(config['manualSeed']) + if config['cuda']: + torch.cuda.manual_seed_all(config['manualSeed']) + config2['data_manner'] = 'lmdb' + config['dataset_json_folder'] = 'preprocessing/dataset_json_v3' + config.update(config2) + train_set = LRLDataset(config=config, mode='train') + train_data_loader = \ + torch.utils.data.DataLoader( + dataset=train_set, + batch_size=4, + shuffle=True, + num_workers=0, + collate_fn=train_set.collate_fn, + ) + from tqdm import tqdm + for iteration, batch in enumerate(tqdm(train_data_loader)): + print(iteration) + if iteration > 10: + break \ No newline at end of file diff --git a/training/dataset/lsda_dataset.py b/training/dataset/lsda_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..03ac7b1be198ea4c266ff82951d71b8b685fa3eb --- /dev/null +++ b/training/dataset/lsda_dataset.py @@ -0,0 +1,382 @@ +import sys +sys.path.append('.') + +import os +import sys +import json +import math +import yaml + +import numpy as np +import cv2 +import random +from PIL import Image + +import torch +from torch.autograd import Variable +from torch.utils import data +from torchvision import transforms as T + + +import skimage.draw +import albumentations as alb +from albumentations import Compose, RandomBrightnessContrast, \ + HorizontalFlip, FancyPCA, HueSaturationValue, OneOf, ToGray, \ + ShiftScaleRotate, ImageCompression, PadIfNeeded, GaussNoise, GaussianBlur, RandomResizedCrop +from torch.utils.data.sampler import Sampler +from .abstract_dataset import DeepfakeAbstractBaseDataset + + +private_path_prefix = '/home/zhaokangran/cvpr24/training' + +fake_dict = { + 'real': 0, + 'Deepfakes': 1, + 'Face2Face': 2, + 'FaceSwap': 3, + 'NeuralTextures': 4, + # 'Deepfakes_Face2Face': 5, + # 'Deepfakes_FaceSwap': 6, + # 'Deepfakes_NeuralTextures': 7, + # 'Deepfakes_real': 8, + # 'Face2Face_FaceSwap': 9, + # 'Face2Face_NeuralTextures': 10, + # 'Face2Face_real': 11, + # 'FaceSwap_NeuralTextures': 12, + # 'FaceSwap_real': 13, + # 'NeuralTextures_real': 14, +} + + + +class RandomDownScale(alb.core.transforms_interface.ImageOnlyTransform): + def apply(self,img,**params): + return self.randomdownscale(img) + + def randomdownscale(self,img): + keep_ratio=True + keep_input_shape=True + H,W,C=img.shape + ratio_list=[2,4] + r=ratio_list[np.random.randint(len(ratio_list))] + img_ds=cv2.resize(img,(int(W/r),int(H/r)),interpolation=cv2.INTER_NEAREST) + if keep_input_shape: + img_ds=cv2.resize(img_ds,(W,H),interpolation=cv2.INTER_LINEAR) + + return img_ds + + +augmentation_methods = alb.Compose([ + # alb.RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1,0.1), p=0.5), + # HorizontalFlip(p=0.5), + # RandomDownScale(p=0.5), + # alb.Sharpen(alpha=(0.2, 0.5), lightness=(0.5, 1.0), p=0.5), + alb.ImageCompression(quality_lower=40,quality_upper=100,p=0.5), + GaussianBlur(blur_limit=[3, 7], p=0.5) +], p=1.) + +augmentation_methods2 = alb.Compose([ + alb.RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1,0.1), p=0.5), + HorizontalFlip(p=0.5), + RandomDownScale(p=0.5), + alb.Sharpen(alpha=(0.2, 0.5), lightness=(0.5, 1.0), p=0.5), + alb.ImageCompression(quality_lower=40,quality_upper=100,p=0.5), +], +additional_targets={f'image1':'image', f'image2':'image', f'image3':'image', f'image4':'image'}, +p=1.) + +normalize = T.Normalize(mean=[0.5, 0.5, 0.5], + std =[0.5, 0.5, 0.5]) +transforms1 = T.Compose([ + T.ToTensor(), + normalize + ]) + +#========================================== + +def load_rgb(file_path, size=256): + assert os.path.exists(file_path), f"{file_path} is not exists" + img = cv2.imread(file_path) + if img is None: + raise ValueError('Img is None: {}'.format(file_path)) + + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = cv2.resize(img, (size, size), interpolation=cv2.INTER_CUBIC) + + return Image.fromarray(np.array(img, dtype=np.uint8)) + + +def load_mask(file_path, size=256): + mask = cv2.imread(file_path, 0) + if mask is None: + mask = np.zeros((size, size)) + + mask = cv2.resize(mask, (size, size))/255 + mask = np.expand_dims(mask, axis=2) + return np.float32(mask) + + +def add_gaussian_noise(ins, mean=0, stddev=0.1): + noise = ins.data.new(ins.size()).normal_(mean, stddev) + return torch.clamp(ins + noise, -1, 1) + + +# class RandomBlur(object): +# """ Randomly blur an image +# """ +# def __init__(self, ratio,) + +# class RandomCompression(object): +# """ Randomly compress an image +# """ + +class CustomSampler(Sampler): + def __init__(self, num_groups=2*360, n_frame_per_vid=32, videos_per_group=5, batch_size=10): + self.num_groups = num_groups + self.n_frame_per_vid = n_frame_per_vid + self.videos_per_group = videos_per_group + self.batch_size = batch_size + assert self.batch_size % self.videos_per_group == 0, "Batch size should be a multiple of videos_per_group." + self.groups_per_batch = self.batch_size // self.videos_per_group + + def __iter__(self): + group_indices = list(range(self.num_groups)) + random.shuffle(group_indices) + + # For each batch + for i in range(0, len(group_indices), self.groups_per_batch): + selected_groups = group_indices[i:i+self.groups_per_batch] + + # For each group + for group in selected_groups: + frame_idx = random.randint(0, self.n_frame_per_vid - 1) # Random frame index for this group's videos + + # Return the frame for each video in this group using the same frame_idx + for video_offset in range(self.videos_per_group): + yield group * self.videos_per_group * self.n_frame_per_vid + video_offset * self.n_frame_per_vid + frame_idx + + def __len__(self): + return self.num_groups * self.videos_per_group # Total frames + + + +class LSDADataset(DeepfakeAbstractBaseDataset): + + on_3060 = "3060" in torch.cuda.get_device_name() + transfer_dict = { + 'youtube':'FF-real', + 'Deepfakes':'FF-DF', + 'Face2Face':'FF-F2F', + 'FaceSwap':'FF-FS', + 'NeuralTextures':'FF-NT' + + + } + if on_3060: + data_root = r'F:\Datasets\rgb\FaceForensics++' + else: + data_root = r'./datasets/FaceForensics++' + data_list = { + 'test': r'./datasets/FaceForensics++/test.json', + 'train': r'./datasets/FaceForensics++/train.json', + 'eval': r'./datasets/FaceForensics++/val.json' + } + + def __init__(self, config=None, mode='train', with_dataset=['Deepfakes', 'Face2Face', 'FaceSwap', 'NeuralTextures']): + super().__init__(config, mode) + self.mode = mode + self.res = config['resolution'] + self.fake_dict = fake_dict + # transform + self.normalize = T.Normalize(mean=config['mean'], + std =config['std']) + # data aug and transform + self.transforms1 = T.Compose([ + T.ToTensor(), + self.normalize + ]) + self.img_lines = [] + self.config=config + with open(self.config['dataset_json_folder']+'/FaceForensics++.json', 'r') as fd: + self.img_json = json.load(fd) + with open(self.data_list[mode], 'r') as fd: + data = json.load(fd) + img_lines = [] + for pair in data: + r1, r2 = pair + step = 1 + # collect a group with 1+len(fakes) videos, each video has self.frames[mode] frames。这里就是按同一个video这种顺序来存的,所以读的时候自然只要有了offset,就能对应的取了 + #此外,这里面存的压根就不是路径,而是规范化的内容。 + for i in range(0, config['frame_num'][mode], step): + # collect real data here(r1) + img_lines.append(('{}/{}'.format('youtube', r1), i, 0, mode)) + + for fake_d in with_dataset: + # collect fake data here(r1_r2 * 4) + for i in range(0, config['frame_num'][mode], step): + img_lines.append( + ('{}/{}_{}'.format(fake_d, r1, r2), i, self.fake_dict[fake_d], mode)) + + for i in range(0, config['frame_num'][mode], step): + # collect real data here(r2) + img_lines.append(('{}/{}'.format('youtube', r2), i, 0, mode)) + + for fake_d in with_dataset: + # collect fake data here(r2_r1 * 4) + for i in range(0, config['frame_num'][mode], step): + img_lines.append( + ('{}/{}_{}'.format(fake_d, r2, r1), i, self.fake_dict[fake_d], mode)) + + # 2*360 (groups) * 1+len(with_dataset) (videos in each group) * self.frames[mode] (frames in each video) + assert len(img_lines) == 2*len(data) * (1 + len(with_dataset)) * config['frame_num'][mode], "to match our custom sampler, the length should be 2*360*(1+len(with_dataset))*frames[mode]" + self.img_lines.extend(img_lines) + + + def get_ids_from_path(self, path): + parts = path.split('/') + try: + if 'youtube' in path: + return [int(parts[-1])] + else: + return list(map(int, parts[-1].split('_'))) + except: + raise ValueError("wrong path: {}".format(path)) + + def load_image(self, name, idx): + instance_type, video_name = name.split('/') + #其实并没有完全对应,而只是保证在同一video的目标时间区间内的一帧 + all_frames = self.img_json[self.data_root.split(os.path.sep)[-1]][self.transfer_dict[instance_type]]['train']['c23'][video_name]['frames'] + img_path = all_frames[idx] + + impath = img_path + img = self.load_rgb(impath) + return img + + def __getitem__(self, index): + name, idx, label, mode = self.img_lines[index] #这个sampler的目的是不要取重复video的图。 + label = int(label) # specific fake label from 1-4 + + #取img没什么好说的。然后在这里把规范化的img_lines转为实际路径。 + try: + img = self.load_image(name, idx) + except Exception as e: + # 下面处理不太合适,取的不是预期的video_id/fake_method,影响后面的lsda。 + # random_idx = random.randint(0, len(self.img_lines)-1) + # print(f'Error loading image {name} at index {idx} due to the loading error. Try another one at index {random_idx}') + # return self.__getitem__(random_idx) + + #边界条件判断,取同video的。 + if idx==0: + new_index = index+1 + elif idx==31: + new_index = index-1 + else: + new_index = index + random.choice([-1,1]) # 通过随机防止死递归 + print(f'Error loading image {name} at index {idx} due to the loading error. Try another one at index {new_index}') + return self.__getitem__(new_index) + + + if self.mode=='train': + # do augmentation + img = np.asarray(img) # convert PIL to numpy + + img = augmentation_methods2(image=img)['image'] + img = Image.fromarray(np.array(img, dtype=np.uint8)) # covnert numpy to PIL + + # transform with PIL as input + img = self.transforms1(img) + else: + raise ValueError("Not implemented yet") + + return (img, label) + + + + def __len__(self): + return len(self.img_lines) + + + + @staticmethod + def collate_fn(batch): + # Unzip the batch into images and labels + images, labels = zip(*batch) + + # images, labels = zip(batch['image'], batch['label']) + + # image_list = [] + + # for i in range(len(images)//5): + + # img = images[i*5:(i+1)*5] + + # # do augmentation + # imgs_aug = augmentation_methods2(image=np.asarray(img[0]), image1=np.asarray(img[1]), image2=np.asarray(img[2]), image3=np.asarray(img[3]), image4=np.asarray(img[4])) + # for k in imgs_aug: + + # img_aug = Image.fromarray(np.array(imgs_aug[k], dtype=np.uint8)) # covnert numpy to PIL + + # # transform with PIL as input + # img_aug = transforms1(img_aug) + # image_list.append(img_aug) + + # Stack the images and labels + images = torch.stack(images, dim=0) # Shape: (batch_size, c, h, w) + labels = torch.tensor(labels, dtype=torch.long) + + bs, c, h, w = images.shape + + # Assume videos_per_group is 5 in our case + videos_per_group = 5 + num_groups = bs // videos_per_group + + # Reshape to get the group dimension: (num_groups, videos_per_group, c, h, w) + images_grouped = images.view(num_groups, videos_per_group, c, h, w) + labels_grouped = labels.view(num_groups, videos_per_group) + + valid_indices = [] + for i, group in enumerate(labels_grouped): + if set(group.numpy().tolist()) == {0, 1, 2, 3, 4}: + valid_indices.append(i) + # elif set(group.numpy().tolist()) == {0, 1, 2, 3}: + # valid_indices.append(i) + # elif set(group.numpy().tolist()) == {0, 1, 2, 3, 4, 5}: + # valid_indices.append(i) + + images_grouped = images_grouped[valid_indices] + labels_grouped = labels_grouped[valid_indices] + + if not valid_indices: + raise ValueError("No valid groups found in this batch.") + + # # Shuffle the video order within each group + # for i in range(num_groups): + # perm = torch.randperm(videos_per_group) + # images_grouped[i] = images_grouped[i, perm] + # labels_grouped[i] = labels_grouped[i, perm] + + # # Flatten back to original shape but with shuffled video order + # images_shuffled = images_grouped.view(num_groups, videos_per_group, c, h, w) + # labels_shuffled = labels_grouped.view(bs) + + return {'image': images_grouped, 'label': labels_grouped, 'mask': None, 'landmark': None} + + +if __name__ == '__main__': + with open('/data/home/zhiyuanyan/DeepfakeBench/training/config/detector/lsda.yaml', 'r') as f: + config = yaml.safe_load(f) + train_set = LSDADataset(config=config, mode='train') + custom_sampler = CustomSampler(num_groups=2*360, n_frame_per_vid=config['frame_num']['train'], batch_size=config['train_batchSize'], videos_per_group=5) + train_data_loader = \ + torch.utils.data.DataLoader( + dataset=train_set, + batch_size=config['train_batchSize'], + num_workers=0, + sampler=custom_sampler, + collate_fn=train_set.collate_fn, + ) + from tqdm import tqdm + for iteration, batch in enumerate(tqdm(train_data_loader)): + print(iteration) + if iteration > 10: + break \ No newline at end of file diff --git a/training/dataset/pair_dataset.py b/training/dataset/pair_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..ce3b676e70f215d0261dff5339d2bf56a1f67f96 --- /dev/null +++ b/training/dataset/pair_dataset.py @@ -0,0 +1,150 @@ +''' +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2023-03-30 + +The code is designed for scenarios such as disentanglement-based methods where it is necessary to ensure an equal number of positive and negative samples. +''' + +import torch +import random +import numpy as np +from dataset.abstract_dataset import DeepfakeAbstractBaseDataset + + +class pairDataset(DeepfakeAbstractBaseDataset): + def __init__(self, config=None, mode='train'): + super().__init__(config, mode) + + # Get real and fake image lists + # Fix the label of real images to be 0 and fake images to be 1 + self.fake_imglist = [(img, label, 1) for img, label in zip(self.image_list, self.label_list) if label != 0] + self.real_imglist = [(img, label, 0) for img, label in zip(self.image_list, self.label_list) if label == 0] + + def __getitem__(self, index, norm=True): + # Get the fake and real image paths and labels + fake_image_path, fake_spe_label, fake_label = self.fake_imglist[index] + real_index = random.randint(0, len(self.real_imglist) - 1) # Randomly select a real image + real_image_path, real_spe_label, real_label = self.real_imglist[real_index] + + # Get the mask and landmark paths for fake and real images + fake_mask_path = fake_image_path.replace('frames', 'masks') + fake_landmark_path = fake_image_path.replace('frames', 'landmarks').replace('.png', '.npy') + + real_mask_path = real_image_path.replace('frames', 'masks') + real_landmark_path = real_image_path.replace('frames', 'landmarks').replace('.png', '.npy') + + # Load the fake and real images + fake_image = self.load_rgb(fake_image_path) + real_image = self.load_rgb(real_image_path) + + fake_image = np.array(fake_image) # Convert to numpy array for data augmentation + real_image = np.array(real_image) # Convert to numpy array for data augmentation + + # Load mask and landmark (if needed) for fake and real images + if self.config['with_mask']: + fake_mask = self.load_mask(fake_mask_path) + real_mask = self.load_mask(real_mask_path) + else: + fake_mask, real_mask = None, None + + if self.config['with_landmark']: + fake_landmarks = self.load_landmark(fake_landmark_path) + real_landmarks = self.load_landmark(real_landmark_path) + else: + fake_landmarks, real_landmarks = None, None + + # Do transforms for fake and real images + fake_image_trans, fake_landmarks_trans, fake_mask_trans = self.data_aug(fake_image, fake_landmarks, fake_mask) + real_image_trans, real_landmarks_trans, real_mask_trans = self.data_aug(real_image, real_landmarks, real_mask) + + if not norm: + return {"fake": (fake_image_trans, fake_label), + "real": (real_image_trans, real_label)} + + # To tensor and normalize for fake and real images + fake_image_trans = self.normalize(self.to_tensor(fake_image_trans)) + real_image_trans = self.normalize(self.to_tensor(real_image_trans)) + + # Convert landmarks and masks to tensors if they exist + if self.config['with_landmark']: + fake_landmarks_trans = torch.from_numpy(fake_landmarks_trans) + real_landmarks_trans = torch.from_numpy(real_landmarks_trans) + if self.config['with_mask']: + fake_mask_trans = torch.from_numpy(fake_mask_trans) + real_mask_trans = torch.from_numpy(real_mask_trans) + + return {"fake": (fake_image_trans, fake_label, fake_spe_label, fake_landmarks_trans, fake_mask_trans), + "real": (real_image_trans, real_label, real_spe_label, real_landmarks_trans, real_mask_trans)} + + def __len__(self): + return len(self.fake_imglist) + + @staticmethod + def collate_fn(batch): + """ + Collate a batch of data points. + + Args: + batch (list): A list of tuples containing the image tensor, the label tensor, + the landmark tensor, and the mask tensor. + + Returns: + A tuple containing the image tensor, the label tensor, the landmark tensor, + and the mask tensor. + """ + # Separate the image, label, landmark, and mask tensors for fake and real data + fake_images, fake_labels, fake_spe_labels, fake_landmarks, fake_masks = zip(*[data["fake"] for data in batch]) + real_images, real_labels, real_spe_labels, real_landmarks, real_masks = zip(*[data["real"] for data in batch]) + + # Stack the image, label, landmark, and mask tensors for fake and real data + fake_images = torch.stack(fake_images, dim=0) + fake_labels = torch.LongTensor(fake_labels) + fake_spe_labels = torch.LongTensor(fake_spe_labels) + real_images = torch.stack(real_images, dim=0) + real_labels = torch.LongTensor(real_labels) + real_spe_labels = torch.LongTensor(real_spe_labels) + + # Special case for landmarks and masks if they are None + if fake_landmarks[0] is not None: + fake_landmarks = torch.stack(fake_landmarks, dim=0) + else: + fake_landmarks = None + if real_landmarks[0] is not None: + real_landmarks = torch.stack(real_landmarks, dim=0) + else: + real_landmarks = None + + if fake_masks[0] is not None: + fake_masks = torch.stack(fake_masks, dim=0) + else: + fake_masks = None + if real_masks[0] is not None: + real_masks = torch.stack(real_masks, dim=0) + else: + real_masks = None + + # Combine the fake and real tensors and create a dictionary of the tensors + images = torch.cat([real_images, fake_images], dim=0) + labels = torch.cat([real_labels, fake_labels], dim=0) + spe_labels = torch.cat([real_spe_labels, fake_spe_labels], dim=0) + + if fake_landmarks is not None and real_landmarks is not None: + landmarks = torch.cat([real_landmarks, fake_landmarks], dim=0) + else: + landmarks = None + + if fake_masks is not None and real_masks is not None: + masks = torch.cat([real_masks, fake_masks], dim=0) + else: + masks = None + + data_dict = { + 'image': images, + 'label': labels, + 'label_spe': spe_labels, + 'landmark': landmarks, + 'mask': masks + } + return data_dict + diff --git a/training/dataset/sbi_api.py b/training/dataset/sbi_api.py new file mode 100644 index 0000000000000000000000000000000000000000..461f9b6ab9c3b7c9d26aef1c8428cd01f90c9e4e --- /dev/null +++ b/training/dataset/sbi_api.py @@ -0,0 +1,371 @@ +# Created by: Kaede Shiohara +# Yamasaki Lab at The University of Tokyo +# shiohara@cvm.t.u-tokyo.ac.jp +# Copyright (c) 2021 +# 3rd party softwares' licenses are noticed at https://github.com/mapooon/SelfBlendedImages/blob/master/LICENSE + +import torch +from torchvision import datasets,transforms,utils +from torch.utils.data import Dataset,IterableDataset +from glob import glob +import os +import numpy as np +from PIL import Image +import random +import cv2 +from torch import nn +import sys +import scipy as sp +from skimage.measure import label, regionprops +from training.dataset.library.bi_online_generation import random_get_hull +import albumentations as alb + +import warnings +warnings.filterwarnings('ignore') + + +def alpha_blend(source,target,mask): + mask_blured = get_blend_mask(mask) + img_blended=(mask_blured * source + (1 - mask_blured) * target) + return img_blended,mask_blured + + +def dynamic_blend(source,target,mask): + mask_blured = get_blend_mask(mask) + blend_list=[0.25,0.5,0.75,1,1,1] + blend_ratio = blend_list[np.random.randint(len(blend_list))] + mask_blured*=blend_ratio + img_blended=(mask_blured * source + (1 - mask_blured) * target) + return img_blended,mask_blured + + +def get_blend_mask(mask): + H,W=mask.shape + size_h=np.random.randint(192,257) + size_w=np.random.randint(192,257) + mask=cv2.resize(mask,(size_w,size_h)) + kernel_1=random.randrange(5,26,2) + kernel_1=(kernel_1,kernel_1) + kernel_2=random.randrange(5,26,2) + kernel_2=(kernel_2,kernel_2) + + mask_blured = cv2.GaussianBlur(mask, kernel_1, 0) + mask_blured = mask_blured/(mask_blured.max()) + mask_blured[mask_blured<1]=0 + + mask_blured = cv2.GaussianBlur(mask_blured, kernel_2, np.random.randint(5,46)) + mask_blured = mask_blured/(mask_blured.max()) + mask_blured = cv2.resize(mask_blured,(W,H)) + return mask_blured.reshape((mask_blured.shape+(1,))) + + +def get_alpha_blend_mask(mask): + kernel_list=[(11,11),(9,9),(7,7),(5,5),(3,3)] + blend_list=[0.25,0.5,0.75] + kernel_idxs=random.choices(range(len(kernel_list)), k=2) + blend_ratio = blend_list[random.sample(range(len(blend_list)), 1)[0]] + mask_blured = cv2.GaussianBlur(mask, kernel_list[0], 0) + # print(mask_blured.max()) + mask_blured[mask_blured0]=1 + # mask_blured = mask + mask_blured = cv2.GaussianBlur(mask_blured, kernel_list[kernel_idxs[1]], 0) + mask_blured = mask_blured/(mask_blured.max()) + return mask_blured.reshape((mask_blured.shape+(1,))) + + +class RandomDownScale(alb.core.transforms_interface.ImageOnlyTransform): + def apply(self,img,**params): + return self.randomdownscale(img) + + def randomdownscale(self,img): + keep_ratio=True + keep_input_shape=True + H,W,C=img.shape + ratio_list=[2,4] + r=ratio_list[np.random.randint(len(ratio_list))] + img_ds=cv2.resize(img,(int(W/r),int(H/r)),interpolation=cv2.INTER_NEAREST) + if keep_input_shape: + img_ds=cv2.resize(img_ds,(W,H),interpolation=cv2.INTER_LINEAR) + + return img_ds + + + +def get_boundary(mask, apply_dilation=True, apply_motion_blur=True): + if len(mask.shape) == 3: + mask = mask[:, :, 0] + + mask = cv2.GaussianBlur(mask, (3, 3), 0) + if mask.max() > 1: + boundary = mask / 255. + else: + boundary = mask + boundary = 4 * boundary * (1. - boundary) + + boundary = boundary * 255 + boundary = random_dilate(boundary) + + if apply_motion_blur: + boundary = random_motion_blur(boundary) + boundary = boundary / 255. + return boundary + +def random_dilate(mask, max_kernel_size=5): + kernel_size = random.randint(1, max_kernel_size) + kernel = np.ones((kernel_size, kernel_size), np.uint8) + dilated_mask = cv2.dilate(mask, kernel, iterations=1) + return dilated_mask + +def random_motion_blur(mask, max_kernel_size=5): + kernel_size = random.randint(1, max_kernel_size) + kernel = np.zeros((kernel_size, kernel_size)) + anchor = random.randint(0, kernel_size - 1) + kernel[:, anchor] = 1 / kernel_size + motion_blurred_mask = cv2.filter2D(mask, -1, kernel) + return motion_blurred_mask + + + +class SBI_API: + def __init__(self,phase='train',image_size=256): + + assert phase == 'train', f"Current SBI API only support train phase, but got {phase}" + + self.image_size=(image_size,image_size) + self.phase=phase + + self.transforms=self.get_transforms() + self.source_transforms = self.get_source_transforms() + self.bob_transforms = self.get_source_transforms_for_bob() + + + def __call__(self,img,landmark=None): + try: + assert landmark is not None, "landmark of the facial image should not be None." + # img_r,img_f,mask_f=self.self_blending(img.copy(),landmark.copy()) + + if random.random() < 1.0: + # apply sbi + img_r,img_f,mask_f=self.self_blending(img.copy(),landmark.copy()) + else: + # apply boundary motion blur (bob) + img_r,img_f,mask_f=self.bob(img.copy(),landmark.copy()) + + if self.phase=='train': + transformed=self.transforms(image=img_f.astype('uint8'),image1=img_r.astype('uint8')) + img_f=transformed['image'] + img_r=transformed['image1'] + return img_f,img_r + except Exception as e: + print(e) + return None,None + + + def get_source_transforms(self): + return alb.Compose([ + alb.Compose([ + alb.RGBShift((-20,20),(-20,20),(-20,20),p=0.3), + alb.HueSaturationValue(hue_shift_limit=(-0.3,0.3), sat_shift_limit=(-0.3,0.3), val_shift_limit=(-0.3,0.3), p=1), + alb.RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1,0.1), p=1), + ],p=1), + + alb.OneOf([ + RandomDownScale(p=1), + alb.Sharpen(alpha=(0.2, 0.5), lightness=(0.5, 1.0), p=1), + ],p=1), + + ], p=1.) + + + def get_transforms(self): + return alb.Compose([ + + alb.RGBShift((-20,20),(-20,20),(-20,20),p=0.3), + alb.HueSaturationValue(hue_shift_limit=(-0.3,0.3), sat_shift_limit=(-0.3,0.3), val_shift_limit=(-0.3,0.3), p=0.3), + alb.RandomBrightnessContrast(brightness_limit=(-0.3,0.3), contrast_limit=(-0.3,0.3), p=0.3), + alb.ImageCompression(quality_lower=40,quality_upper=100,p=0.5), + + ], + additional_targets={f'image1': 'image'}, + p=1.) + + + def randaffine(self,img,mask): + f=alb.Affine( + translate_percent={'x':(-0.03,0.03),'y':(-0.015,0.015)}, + scale=[0.95,1/0.95], + fit_output=False, + p=1) + + g=alb.ElasticTransform( + alpha=50, + sigma=7, + alpha_affine=0, + p=1, + ) + + transformed=f(image=img,mask=mask) + img=transformed['image'] + + mask=transformed['mask'] + transformed=g(image=img,mask=mask) + mask=transformed['mask'] + return img,mask + + + def get_source_transforms_for_bob(self): + return alb.Compose([ + alb.Compose([ + alb.ImageCompression(quality_lower=40,quality_upper=100,p=1), + ],p=1), + + alb.OneOf([ + RandomDownScale(p=1), + alb.Sharpen(alpha=(0.2, 0.5), lightness=(0.5, 1.0), p=1), + ],p=1), + + ], p=1.) + + def bob(self,img,landmark): + H,W=len(img),len(img[0]) + if np.random.rand()<0.25: + landmark=landmark[:68] + # mask=np.zeros_like(img[:,:,0]) + # cv2.fillConvexPoly(mask, cv2.convexHull(landmark), 1.) + hull_type = random.choice([0, 1, 2, 3]) + mask=random_get_hull(landmark,img,hull_type)[:,:,0] + + source = img.copy() + source = self.bob_transforms(image=source.astype(np.uint8))['image'] + source, mask = self.randaffine(source,mask) + mask = get_blend_mask(mask) + + # get boundary with motion blur + boundary = get_boundary(mask) + + blend_list = [0.25,0.5,0.75,1,1,1] + blend_ratio = blend_list[np.random.randint(len(blend_list))] + boundary *= blend_ratio + boundary = np.repeat(boundary[:, :, np.newaxis], 3, axis=2) + img_blended = (boundary * source + (1 - boundary) * img) + + img_blended = img_blended.astype(np.uint8) + img = img.astype(np.uint8) + + return img,img_blended,boundary.squeeze() + + + def self_blending(self,img,landmark): + H,W=len(img),len(img[0]) + if np.random.rand()<0.25: + landmark=landmark[:68] + # mask=np.zeros_like(img[:,:,0]) + # cv2.fillConvexPoly(mask, cv2.convexHull(landmark), 1.) + hull_type = random.choice([0, 1, 2, 3]) + mask=random_get_hull(landmark,img,hull_type)[:,:,0] + + source = img.copy() + if np.random.rand()<0.5: + source = self.source_transforms(image=source.astype(np.uint8))['image'] + else: + img = self.source_transforms(image=img.astype(np.uint8))['image'] + + source, mask = self.randaffine(source,mask) + + img_blended,mask=dynamic_blend(source,img,mask) + img_blended = img_blended.astype(np.uint8) + img = img.astype(np.uint8) + + return img,img_blended,mask + + + def reorder_landmark(self,landmark): + landmark_add=np.zeros((13,2)) + for idx,idx_l in enumerate([77,75,76,68,69,70,71,80,72,73,79,74,78]): + landmark_add[idx]=landmark[idx_l] + landmark[68:]=landmark_add + return landmark + + + def hflip(self,img,mask=None,landmark=None,bbox=None): + H,W=img.shape[:2] + landmark=landmark.copy() + if bbox is not None: + bbox=bbox.copy() + + if landmark is not None: + landmark_new=np.zeros_like(landmark) + + + landmark_new[:17]=landmark[:17][::-1] + landmark_new[17:27]=landmark[17:27][::-1] + + landmark_new[27:31]=landmark[27:31] + landmark_new[31:36]=landmark[31:36][::-1] + + landmark_new[36:40]=landmark[42:46][::-1] + landmark_new[40:42]=landmark[46:48][::-1] + + landmark_new[42:46]=landmark[36:40][::-1] + landmark_new[46:48]=landmark[40:42][::-1] + + landmark_new[48:55]=landmark[48:55][::-1] + landmark_new[55:60]=landmark[55:60][::-1] + + landmark_new[60:65]=landmark[60:65][::-1] + landmark_new[65:68]=landmark[65:68][::-1] + if len(landmark)==68: + pass + elif len(landmark)==81: + landmark_new[68:81]=landmark[68:81][::-1] + else: + raise NotImplementedError + landmark_new[:,0]=W-landmark_new[:,0] + + else: + landmark_new=None + + if bbox is not None: + bbox_new=np.zeros_like(bbox) + bbox_new[0,0]=bbox[1,0] + bbox_new[1,0]=bbox[0,0] + bbox_new[:,0]=W-bbox_new[:,0] + bbox_new[:,1]=bbox[:,1].copy() + if len(bbox)>2: + bbox_new[2,0]=W-bbox[3,0] + bbox_new[2,1]=bbox[3,1] + bbox_new[3,0]=W-bbox[2,0] + bbox_new[3,1]=bbox[2,1] + bbox_new[4,0]=W-bbox[4,0] + bbox_new[4,1]=bbox[4,1] + bbox_new[5,0]=W-bbox[6,0] + bbox_new[5,1]=bbox[6,1] + bbox_new[6,0]=W-bbox[5,0] + bbox_new[6,1]=bbox[5,1] + else: + bbox_new=None + + if mask is not None: + mask=mask[:,::-1] + else: + mask=None + img=img[:,::-1].copy() + return img,mask,landmark_new,bbox_new + + +if __name__=='__main__': + seed=10 + random.seed(seed) + torch.manual_seed(seed) + np.random.seed(seed) + torch.cuda.manual_seed(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + api=SBI_API(phase='train',image_size=256) + + img_path = 'FaceForensics++/original_sequences/youtube/c23/frames/000/000.png' + img = cv2.imread(img_path) + landmark_path = img_path.replace('frames', 'landmarks').replace('png', 'npy') + landmark = np.load(landmark_path) + sbi_img, ori_img = api(img, landmark) diff --git a/training/dataset/sbi_dataset.py b/training/dataset/sbi_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..d39e38d2ac9f599067cb4efe4cef063eb74eee35 --- /dev/null +++ b/training/dataset/sbi_dataset.py @@ -0,0 +1,139 @@ +''' +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2024-01-26 + +The code is designed for self-blending method (SBI, CVPR 2024). +''' + +import sys +sys.path.append('.') + +import cv2 +import yaml +import torch +import numpy as np +from copy import deepcopy +import albumentations as A +from training.dataset.albu import IsotropicResize +from training.dataset.abstract_dataset import DeepfakeAbstractBaseDataset +from training.dataset.sbi_api import SBI_API + + +class SBIDataset(DeepfakeAbstractBaseDataset): + def __init__(self, config=None, mode='train'): + super().__init__(config, mode) + + # Get real lists + # Fix the label of real images to be 0 + self.real_imglist = [(img, label) for img, label in zip(self.image_list, self.label_list) if label == 0] + + # Init SBI + self.sbi = SBI_API(phase=mode,image_size=config['resolution']) + + # Init data augmentation method + self.transform = self.init_data_aug_method() + + def __getitem__(self, index): + # Get the real image paths and labels + real_image_path, real_label = self.real_imglist[index] + + # Get the landmark paths for real images + real_landmark_path = real_image_path.replace('frames', 'landmarks').replace('.png', '.npy') + landmark = self.load_landmark(real_landmark_path).astype(np.int32) + + # Load the real images + real_image = self.load_rgb(real_image_path) + real_image = np.array(real_image) # Convert to numpy array + + # Generate the corresponding SBI sample + fake_image, real_image = self.sbi(real_image, landmark) + if fake_image is None: + fake_image = deepcopy(real_image) + fake_label = 0 + else: + fake_label = 1 + + # To tensor and normalize for fake and real images + fake_image_trans = self.normalize(self.to_tensor(fake_image)) + real_image_trans = self.normalize(self.to_tensor(real_image)) + + return {"fake": (fake_image_trans, fake_label), + "real": (real_image_trans, real_label)} + + def __len__(self): + return len(self.real_imglist) + + @staticmethod + def collate_fn(batch): + """ + Collate a batch of data points. + + Args: + batch (list): A list of tuples containing the image tensor and label tensor. + + Returns: + A tuple containing the image tensor, the label tensor, the landmark tensor, + and the mask tensor. + """ + # Separate the image, label, landmark, and mask tensors for fake and real data + fake_images, fake_labels = zip(*[data["fake"] for data in batch]) + real_images, real_labels = zip(*[data["real"] for data in batch]) + + # Stack the image, label, landmark, and mask tensors for fake and real data + fake_images = torch.stack(fake_images, dim=0) + fake_labels = torch.LongTensor(fake_labels) + real_images = torch.stack(real_images, dim=0) + real_labels = torch.LongTensor(real_labels) + + # Combine the fake and real tensors and create a dictionary of the tensors + images = torch.cat([real_images, fake_images], dim=0) + labels = torch.cat([real_labels, fake_labels], dim=0) + + data_dict = { + 'image': images, + 'label': labels, + 'landmark': None, + 'mask': None, + } + return data_dict + + def init_data_aug_method(self): + trans = A.Compose([ + A.HorizontalFlip(p=self.config['data_aug']['flip_prob']), + A.Rotate(limit=self.config['data_aug']['rotate_limit'], p=self.config['data_aug']['rotate_prob']), + A.GaussianBlur(blur_limit=self.config['data_aug']['blur_limit'], p=self.config['data_aug']['blur_prob']), + A.OneOf([ + IsotropicResize(max_side=self.config['resolution'], interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC), + IsotropicResize(max_side=self.config['resolution'], interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_LINEAR), + IsotropicResize(max_side=self.config['resolution'], interpolation_down=cv2.INTER_LINEAR, interpolation_up=cv2.INTER_LINEAR), + ], p = 0 if self.config['with_landmark'] else 1), + A.OneOf([ + A.RandomBrightnessContrast(brightness_limit=self.config['data_aug']['brightness_limit'], contrast_limit=self.config['data_aug']['contrast_limit']), + A.FancyPCA(), + A.HueSaturationValue() + ], p=0.5), + A.ImageCompression(quality_lower=self.config['data_aug']['quality_lower'], quality_upper=self.config['data_aug']['quality_upper'], p=0.5) + ], + additional_targets={'real': 'sbi'}, + ) + return trans + + +if __name__ == '__main__': + with open('/data/home/zhiyuanyan/DeepfakeBench/training/config/detector/sbi.yaml', 'r') as f: + config = yaml.safe_load(f) + train_set = SBIDataset(config=config, mode='train') + train_data_loader = \ + torch.utils.data.DataLoader( + dataset=train_set, + batch_size=config['train_batchSize'], + shuffle=True, + num_workers=0, + collate_fn=train_set.collate_fn, + ) + from tqdm import tqdm + for iteration, batch in enumerate(tqdm(train_data_loader)): + print(iteration) + if iteration > 10: + break \ No newline at end of file diff --git a/training/dataset/tall_dataset.py b/training/dataset/tall_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..3d6fe75b5941368727cfe5f4d7a649316a4fa5f1 --- /dev/null +++ b/training/dataset/tall_dataset.py @@ -0,0 +1,183 @@ +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2023-03-30 +# description: Abstract Base Class for all types of deepfake datasets. + +import sys + +from torch import nn + +sys.path.append('.') + +import yaml +import numpy as np +from copy import deepcopy +import random +import torch +from torch.utils import data +from torchvision.utils import save_image +from training.dataset import DeepfakeAbstractBaseDataset +from einops import rearrange + +FFpp_pool = ['FaceForensics++', 'FaceShifter', 'DeepFakeDetection', 'FF-DF', 'FF-F2F', 'FF-FS', 'FF-NT'] # + + +def all_in_pool(inputs, pool): + for each in inputs: + if each not in pool: + return False + return True + + +class TALLDataset(DeepfakeAbstractBaseDataset): + def __init__(self, config=None, mode='train'): + """Initializes the dataset object. + + Args: + config (dict): A dictionary containing configuration parameters. + mode (str): A string indicating the mode (train or test). + + Raises: + NotImplementedError: If mode is not train or test. + """ + super().__init__(config, mode) + + assert self.video_level, "TALL is a videl-based method" + assert int(self.clip_size ** 0.5) ** 2 == self.clip_size, 'clip_size must be square of an integer, e.g., 4' + + def __getitem__(self, index, no_norm=False): + """ + Returns the data point at the given index. + + Args: + index (int): The index of the data point. + + Returns: + A tuple containing the image tensor, the label tensor, the landmark tensor, + and the mask tensor. + """ + # Get the image paths and label + image_paths = self.data_dict['image'][index] + label = self.data_dict['label'][index] + + if not isinstance(image_paths, list): + image_paths = [image_paths] # for the image-level IO, only one frame is used + + image_tensors = [] + landmark_tensors = [] + mask_tensors = [] + augmentation_seed = None + + for image_path in image_paths: + # Initialize a new seed for data augmentation at the start of each video + if self.video_level and image_path == image_paths[0]: + augmentation_seed = random.randint(0, 2 ** 32 - 1) + + # Get the mask and landmark paths + mask_path = image_path.replace('frames', 'masks') # Use .png for mask + landmark_path = image_path.replace('frames', 'landmarks').replace('.png', '.npy') # Use .npy for landmark + + # Load the image + try: + image = self.load_rgb(image_path) + except Exception as e: + # Skip this image and return the first one + print(f"Error loading image at index {index}: {e}") + return self.__getitem__(0) + image = np.array(image) # Convert to numpy array for data augmentation + + # Load mask and landmark (if needed) + if self.config['with_mask']: + mask = self.load_mask(mask_path) + else: + mask = None + if self.config['with_landmark']: + landmarks = self.load_landmark(landmark_path) + else: + landmarks = None + + # Do Data Augmentation + if self.mode == 'train' and self.config['use_data_augmentation']: + image_trans, landmarks_trans, mask_trans = self.data_aug(image, landmarks, mask, augmentation_seed) + else: + image_trans, landmarks_trans, mask_trans = deepcopy(image), deepcopy(landmarks), deepcopy(mask) + + # To tensor and normalize + if not no_norm: + image_trans = self.normalize(self.to_tensor(image_trans)) + if self.config['with_landmark']: + landmarks_trans = torch.from_numpy(landmarks) + if self.config['with_mask']: + mask_trans = torch.from_numpy(mask_trans) + + image_tensors.append(image_trans) + landmark_tensors.append(landmarks_trans) + mask_tensors.append(mask_trans) + + if self.video_level: + + # Stack image tensors along a new dimension (time) + image_tensors = torch.stack(image_tensors, dim=0) + + # cut out 16x16 patch + F, C, H, W = image_tensors.shape + x, y = np.random.randint(W), np.random.randint(H) + x1 = np.clip(x - self.config['mask_grid_size'] // 2, 0, W) + x2 = np.clip(x + self.config['mask_grid_size'] // 2, 0, W) + y1 = np.clip(y - self.config['mask_grid_size'] // 2, 0, H) + y2 = np.clip(y + self.config['mask_grid_size'] // 2, 0, H) + image_tensors[:, :, y1:y2, x1:x2] = -1 + + # # concatenate sub-image and reszie to 224x224 + # image_tensors = image_tensors.reshape(-1, H, W) + # image_tensors = rearrange(image_tensors, '(rh rw c) h w -> c (rh h) (rw w)', rh=2, c=C) + # image_tensors = nn.functional.interpolate(image_tensors.unsqueeze(0), + # size=(self.config['resolution'], self.config['resolution']), + # mode='bilinear', align_corners=False).squeeze(0) + # Stack landmark and mask tensors along a new dimension (time) + if not any(landmark is None or (isinstance(landmark, list) and None in landmark) for landmark in + landmark_tensors): + landmark_tensors = torch.stack(landmark_tensors, dim=0) + if not any(m is None or (isinstance(m, list) and None in m) for m in mask_tensors): + mask_tensors = torch.stack(mask_tensors, dim=0) + else: + # Get the first image tensor + image_tensors = image_tensors[0] + # Get the first landmark and mask tensors + if not any(landmark is None or (isinstance(landmark, list) and None in landmark) for landmark in + landmark_tensors): + landmark_tensors = landmark_tensors[0] + if not any(m is None or (isinstance(m, list) and None in m) for m in mask_tensors): + mask_tensors = mask_tensors[0] + + return image_tensors, label, landmark_tensors, mask_tensors + + +if __name__ == "__main__": + with open('training/config/detector/tall.yaml', 'r') as f: + config = yaml.safe_load(f) + train_set = TALLDataset( + config=config, + mode='train', + ) + train_data_loader = \ + torch.utils.data.DataLoader( + dataset=train_set, + batch_size=config['train_batchSize'], + shuffle=True, + num_workers=0, + collate_fn=train_set.collate_fn, + ) + from tqdm import tqdm + + for iteration, batch in enumerate(tqdm(train_data_loader)): + print(batch['image'].shape) + print(batch['label']) + b, f, c, h, w = batch['image'].shape + for i in range(f): + img_tensor = batch['image'][0][i] + img_tensor = img_tensor * torch.tensor([0.5, 0.5, 0.5]).reshape(-1, 1, 1) + torch.tensor( + [0.5, 0.5, 0.5]).reshape(-1, 1, 1) + save_image(img_tensor, f'{i}.png') + + break diff --git a/training/dataset/utils/DeepFakeMask.py b/training/dataset/utils/DeepFakeMask.py new file mode 100644 index 0000000000000000000000000000000000000000..8aad1b9b9a37904019179b42a3815be100a63ebc --- /dev/null +++ b/training/dataset/utils/DeepFakeMask.py @@ -0,0 +1,402 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- +# Created by: algohunt +# Microsoft Research & Peking University +# lilingzhi@pku.edu.cn +# Copyright (c) 2019 + +#!/usr/bin/env python3 +""" Masks functions for faceswap.py """ + +import inspect +import logging +import sys + +import cv2 +import numpy as np +import random +from math import ceil, floor +logger = logging.getLogger(__name__) # pylint: disable=invalid-name + +def landmarks_to_bbox(landmarks: np.ndarray) -> np.ndarray: + if not isinstance(landmarks, np.ndarray): + landmarks = np.array(landmarks) + assert landmarks.shape[1] == 2 + x0, y0 = np.min(landmarks, axis=0) # x和y轴上分别的最小值, [264,97] + x1, y1 = np.max(landmarks, axis=0) # x和y轴上分别的最小值, [370,236] + bbox = np.array([x0, y0, x1, y1]) + return bbox + +def mask_from_points(image: np.ndarray, points: np.ndarray) -> np.ndarray: + """8 (or omitted) - 8-connected line. + 4 - 4-connected line. + LINE_AA - antialiased line.""" + h, w = image.shape[:2] + points = points.astype(int) + assert points.shape[1] == 2, f"points.shape: {points.shape}" + out = np.zeros((h, w), dtype=np.uint8) + hull = cv2.convexHull(points.astype(int)) + cv2.fillConvexPoly(out, hull, 255, lineType=4) # cv2.LINE_AA + return out + +def get_available_masks(): + """ Return a list of the available masks for cli """ + masks = sorted([name for name, obj in inspect.getmembers(sys.modules[__name__]) + if inspect.isclass(obj) and name != "Mask"]) + masks.append("none") + # logger.debug(masks) + return masks + +def landmarks_68_symmetries(): + # 68 landmarks symmetry + # + sym_ids = [9, 58, 67, 63, 52, 34, 31, 30, 29, 28] + sym = { + 1: 17, + 2: 16, + 3: 15, + 4: 14, + 5: 13, + 6: 12, + 7: 11, + 8: 10, + # + 51: 53, + 50: 54, + 49: 55, + 60: 56, + 59: 57, + # + 62: 64, + 61: 65, + 68: 66, + # + 33: 35, + 32: 36, + # + 37: 46, + 38: 45, + 39: 44, + 40: 43, + 41: 48, + 42: 47, + # + 18: 27, + 19: 26, + 20: 25, + 21: 24, + 22: 23, + # + # id + 9: 9, + 58: 58, + 67: 67, + 63: 63, + 52: 52, + 34: 34, + 31: 31, + 30: 30, + 29: 29, + 28: 28, + } + return sym, sym_ids + + + +def get_default_mask(): + """ Set the default mask for cli """ + masks = get_available_masks() + default = "dfl_full" + default = default if default in masks else masks[0] + # logger.debug(default) + return default + + +class Mask(): + """ Parent class for masks + the output mask will be .mask + channels: 1, 3 or 4: + 1 - Returns a single channel mask + 3 - Returns a 3 channel mask + 4 - Returns the original image with the mask in the alpha channel """ + + def __init__(self, landmarks, face, channels=4, idx = 0): + # logger.info("Initializing %s: (face_shape: %s, channels: %s, landmarks: %s)", + # self.__class__.__name__, face.shape, channels, landmarks) + self.landmarks = landmarks + self.face = face + self.channels = channels + self.cols = 4 # grid mask + self.rows = 4 # grid mask + self.idx = idx # grid mask + + mask = self.build_mask() + self.mask = self.merge_mask(mask) + # logger.info("Initialized %s", self.__class__.__name__) + + def build_mask(self): + """ Override to build the mask """ + raise NotImplementedError + + def merge_mask(self, mask): + """ Return the mask in requested shape """ + # logger.info("mask_shape: %s", mask.shape) + assert self.channels in (1, 3, 4), "Channels should be 1, 3 or 4" + assert mask.shape[2] == 1 and mask.ndim == 3, "Input mask be 3 dimensions with 1 channel" + + if self.channels == 3: + retval = np.tile(mask, 3) + elif self.channels == 4: + retval = np.concatenate((self.face, mask), -1) + else: + retval = mask + + # logger.info("Final mask shape: %s", retval.shape) + return retval + + +class dfl_full(Mask): # pylint: disable=invalid-name + """ DFL facial mask """ + def build_mask(self): + mask = np.zeros(self.face.shape[0:2] + (1, ), dtype=np.float32) + + nose_ridge = (self.landmarks[27:31], self.landmarks[33:34]) + jaw = (self.landmarks[0:17], + self.landmarks[48:68], + self.landmarks[0:1], + self.landmarks[8:9], + self.landmarks[16:17]) + eyes = (self.landmarks[17:27], + self.landmarks[0:1], + self.landmarks[27:28], + self.landmarks[16:17], + self.landmarks[33:34]) + parts = [jaw, nose_ridge, eyes] + + for item in parts: + merged = np.concatenate(item) + cv2.fillConvexPoly(mask, cv2.convexHull(merged), 255.) # pylint: disable=no-member + return mask + + +class components(Mask): # pylint: disable=invalid-name + """ Component model mask """ + def build_mask(self): + mask = np.zeros(self.face.shape[0:2] + (1, ), dtype=np.float32) + + r_jaw = (self.landmarks[0:9], self.landmarks[17:18]) + l_jaw = (self.landmarks[8:17], self.landmarks[26:27]) + r_cheek = (self.landmarks[17:20], self.landmarks[8:9]) + l_cheek = (self.landmarks[24:27], self.landmarks[8:9]) + nose_ridge = (self.landmarks[19:25], self.landmarks[8:9],) + r_eye = (self.landmarks[17:22], + self.landmarks[27:28], + self.landmarks[31:36], + self.landmarks[8:9]) + l_eye = (self.landmarks[22:27], + self.landmarks[27:28], + self.landmarks[31:36], + self.landmarks[8:9]) + nose = (self.landmarks[27:31], self.landmarks[31:36]) + parts = [r_jaw, l_jaw, r_cheek, l_cheek, nose_ridge, r_eye, l_eye, nose] + + # ---change 0531 random select parts --- + # r_face = (self.landmarks[0:9], self.landmarks[17:18],self.landmarks[17:20], self.landmarks[8:9]) + # l_face = (self.landmarks[8:17], self.landmarks[26:27],self.landmarks[24:27], self.landmarks[8:9]) + # nose_final = (self.landmarks[19:25], self.landmarks[8:9],self.landmarks[27:31], self.landmarks[31:36]) + # parts = [r_face,l_face,nose_final,r_eye,l_eye] + # num_to_select = random.randint(1, len(parts)) + # parts = random.sample(parts, num_to_select) + # print(len(parts), parts[0]) + # ---change 0531 random select parts --- + + for item in parts: + merged = np.concatenate(item) + cv2.fillConvexPoly(mask, cv2.convexHull(merged), 255.) # pylint: disable=no-member + return mask + + +class extended(Mask): # pylint: disable=invalid-name + """ Extended mask + Based on components mask. Attempts to extend the eyebrow points up the forehead + """ + def build_mask(self): + mask = np.zeros(self.face.shape[0:2] + (1, ), dtype=np.float32) + + landmarks = self.landmarks.copy() + # mid points between the side of face and eye point + ml_pnt = (landmarks[36] + landmarks[0]) // 2 + mr_pnt = (landmarks[16] + landmarks[45]) // 2 + + # mid points between the mid points and eye + ql_pnt = (landmarks[36] + ml_pnt) // 2 + qr_pnt = (landmarks[45] + mr_pnt) // 2 + + # Top of the eye arrays + bot_l = np.array((ql_pnt, landmarks[36], landmarks[37], landmarks[38], landmarks[39])) + bot_r = np.array((landmarks[42], landmarks[43], landmarks[44], landmarks[45], qr_pnt)) + + # Eyebrow arrays + top_l = landmarks[17:22] + top_r = landmarks[22:27] + + # Adjust eyebrow arrays + landmarks[17:22] = top_l + ((top_l - bot_l) // 2) + landmarks[22:27] = top_r + ((top_r - bot_r) // 2) + + r_jaw = (landmarks[0:9], landmarks[17:18]) + l_jaw = (landmarks[8:17], landmarks[26:27]) + r_cheek = (landmarks[17:20], landmarks[8:9]) + l_cheek = (landmarks[24:27], landmarks[8:9]) + nose_ridge = (landmarks[19:25], landmarks[8:9],) + r_eye = (landmarks[17:22], landmarks[27:28], landmarks[31:36], landmarks[8:9]) + l_eye = (landmarks[22:27], landmarks[27:28], landmarks[31:36], landmarks[8:9]) + nose = (landmarks[27:31], landmarks[31:36]) + parts = [r_jaw, l_jaw, r_cheek, l_cheek, nose_ridge, r_eye, l_eye, nose] + + for item in parts: + merged = np.concatenate(item) + cv2.fillConvexPoly(mask, cv2.convexHull(merged), 255.) # pylint: disable=no-member + return mask + + +class facehull(Mask): # pylint: disable=invalid-name + """ Basic face hull mask """ + def build_mask(self): + mask = np.zeros(self.face.shape[0:2] + (1, ), dtype=np.float32) + hull = cv2.convexHull( # pylint: disable=no-member + np.array(self.landmarks).reshape((-1, 2))) + cv2.fillConvexPoly(mask, hull, 255.0, lineType=cv2.LINE_AA) # pylint: disable=no-member + return mask + # mask = np.zeros(img.shape[0:2] + (1, ), dtype=np.float32) + # hull = cv2.convexHull(np.array(landmark).reshape((-1, 2))) + +class facehull2(Mask): # pylint: disable=invalid-name + """ Basic face hull mask """ + def build_mask(self): + mask = np.zeros(self.face.shape[0:2] + (1, ), dtype=np.uint8) + hull = cv2.convexHull( # pylint: disable=no-member + np.array(self.landmarks).reshape((-1, 2))) + cv2.fillConvexPoly(mask, hull, 1.0, lineType=cv2.LINE_AA) + return mask + + + +class gridMasking(Mask): + + def build_mask(self): + h, w = self.face.shape[:2] + landmarks = self.landmarks[:68] + # if idx is None: + # idx = np.random.randint(0, self.total) + r, c = divmod(self.idx, self.cols) # 获得除数和余数,即这个idx对应第r行第c列 + + # pixel related + xmin, ymin, xmax, ymax = landmarks_to_bbox(landmarks) + dx = ceil((xmax - xmin) / self.cols) + dy = ceil((ymax - ymin) / self.rows) + + mask = np.zeros((h, w), dtype=np.uint8) + + # fill the cell mask + x0, y0 = floor(xmin + dx * c), floor(ymin + dy * r) + x1, y1 = floor(x0 + dx), floor(y0 + dy) + cv2.rectangle(mask, (x0, y0), (x1, y1), 255, -1) + + # merge the cell mask with the convex hull + ch = mask_from_points(self.face, landmarks) + # ch = cv2.cvtColor(ch, cv2.COLOR_BGR2GRAY) + # mask = (mask & ch) / 255.0 + mask = cv2.bitwise_and(mask, mask, mask=ch) + mask = mask.reshape([mask.shape[0],mask.shape[1], 1]) + # cv2.bitwise_or(img, d_3c_i) + + return mask + +class MeshgridMasking(Mask): + areas = [ + [1, 2, 3, 4, 5, 6, 7, 49, 32, 40, 41, 42, 37, 18], + [37, 38, 39, 40, 41, 42], # left eye + [18, 19, 20, 21, 22, 28, 40, 39, 38, 37], + [28, 29, 30, 31, 32, 40], + ] + areas_asym = [ + [20, 21, 22, 28, 23, 24, 25], # old [22, 23, 28], + [31, 32, 33, 34, 35, 36], + [32, 33, 34, 35, 36, 55, 54, 53, 52, 51, 50, 49], + [49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60], + [7, 8, 9, 10, 11, 55, 56, 57, 58, 59, 60, 49], + ] + + def init(self, **kwargs): + # super().__init__(**kwargs) + + sym, _ = landmarks_68_symmetries() + # construct list of points paths + paths = [] + paths += self.areas_asym # asymmetrical areas + paths += self.areas # left + paths += [[sym[ld68_id] for ld68_id in area] for area in self.areas] # right + assert len(paths) == self.total + self.paths = paths + + @property + def total(self) -> int: + total = len(self.areas_asym) + len(self.areas) * 2 + return total + + def transform_landmarks(self, landmarks): + """Transform landmarks to extend the eyebrow points up the forehead""" + new_landmarks = landmarks.copy() + # mid points between the side of face and eye point + ml_pnt = (new_landmarks[36] + new_landmarks[0]) // 2 + mr_pnt = (new_landmarks[16] + new_landmarks[45]) // 2 + + # mid points between the mid points and eye + ql_pnt = (new_landmarks[36] + ml_pnt) // 2 + qr_pnt = (new_landmarks[45] + mr_pnt) // 2 + + # Top of the eye arrays + bot_l = np.array( + ( + ql_pnt, + new_landmarks[36], + new_landmarks[37], + new_landmarks[38], + new_landmarks[39], + ) + ) + bot_r = np.array( + ( + new_landmarks[42], + new_landmarks[43], + new_landmarks[44], + new_landmarks[45], + qr_pnt, + ) + ) + + # Eyebrow arrays + top_l = new_landmarks[17:22] + top_r = new_landmarks[22:27] + + # Adjust eyebrow arrays + new_landmarks[17:22] = top_l + ((top_l - bot_l) // 2) + new_landmarks[22:27] = top_r + ((top_r - bot_r) // 2) + + return new_landmarks + + def build_mask(self) -> np.ndarray: + self.init() + h, w = self.face.shape[:2] + + path = self.paths[self.idx] + new_landmarks = self.transform_landmarks(self.landmarks) + points = [new_landmarks[ld68_id - 1] for ld68_id in path] + points = np.array(points, dtype=np.int32) + + # cv2.fillConvexPoly(out, points, 255, lineType=4) + mask = np.zeros((h, w), dtype=np.uint8) + cv2.fillPoly(mask, [points], 255) + mask = mask.reshape([mask.shape[0],mask.shape[1], 1]) + return mask \ No newline at end of file diff --git a/training/dataset/utils/SLADD.py b/training/dataset/utils/SLADD.py new file mode 100644 index 0000000000000000000000000000000000000000..5bce890e2ccedb53a3d781fe37beed4a54381448 --- /dev/null +++ b/training/dataset/utils/SLADD.py @@ -0,0 +1,163 @@ +from enum import Enum +from functools import reduce + +import cv2 +import numpy as np +from scipy.ndimage import binary_dilation + +from .DeepFakeMask import Mask + + +def dist(a, b): + x1, y1 = a + x2, y2 = b + return np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) + # return np.linalg.norm(a-b) + + +def get_five_key(landmarks_68): + # get the five key points by using the landmarks + leye_center = (landmarks_68[36] + landmarks_68[39]) * 0.5 + reye_center = (landmarks_68[42] + landmarks_68[45]) * 0.5 + nose = landmarks_68[33] + lmouth = landmarks_68[48] + rmouth = landmarks_68[54] + leye_left = landmarks_68[36] + leye_right = landmarks_68[39] + reye_left = landmarks_68[42] + reye_right = landmarks_68[45] + out = [ + tuple(x.astype("int32")) + for x in [ + leye_center, + reye_center, + nose, + lmouth, + rmouth, + leye_left, + leye_right, + reye_left, + reye_right, + ] + ] + return out + + +def remove_eyes(image, landmarks, opt): + ##l: left eye; r: right eye, b: both eye + if opt == "l": + (x1, y1), (x2, y2) = landmarks[5:7] + elif opt == "r": + (x1, y1), (x2, y2) = landmarks[7:9] + elif opt == "b": + (x1, y1), (x2, y2) = landmarks[:2] + else: + print("wrong region") + mask = np.zeros_like(image[..., 0]) + line = cv2.line(mask, (x1, y1), (x2, y2), color=(1), thickness=2) + w = dist((x1, y1), (x2, y2)) + dilation = int(w // 4) + if opt != "b": + dilation *= 4 + line = binary_dilation(line, iterations=dilation) + return line + + +def remove_nose(image, landmarks): + (x1, y1), (x2, y2) = landmarks[:2] + x3, y3 = landmarks[2] + mask = np.zeros_like(image[..., 0]) + x4 = int((x1 + x2) / 2) + y4 = int((y1 + y2) / 2) + line = cv2.line(mask, (x3, y3), (x4, y4), color=(1), thickness=2) + w = dist((x1, y1), (x2, y2)) + dilation = int(w // 4) + line = binary_dilation(line, iterations=dilation) + return line + + +def remove_mouth(image, landmarks): + (x1, y1), (x2, y2) = landmarks[3:5] + mask = np.zeros_like(image[..., 0]) + line = cv2.line(mask, (x1, y1), (x2, y2), color=(1), thickness=2) + w = dist((x1, y1), (x2, y2)) + dilation = int(w // 3) + line = binary_dilation(line, iterations=dilation) + return line + + +class SladdRegion(Enum): + left_eye = 0 + right_eye = 1 + nose = 2 + mouth = 3 + # composition + both_eyes = left_eye + right_eye # 4 + + +class SladdMasking(Mask): + + # [0, 1, 2, 3, (0, 1), (0, 2), (1, 2), (2, 3), (0, 1, 2), (0, 1, 2, 3)] + # left-eye, right-eye, nose, mouth, ... + ALL_REGIONS = [ + SladdRegion.left_eye, + SladdRegion.right_eye, + SladdRegion.nose, + SladdRegion.mouth, + ] + REGIONS = [ + [SladdRegion.left_eye], + [SladdRegion.right_eye], + [SladdRegion.nose], + [SladdRegion.mouth], + [SladdRegion.left_eye, SladdRegion.right_eye], + [SladdRegion.left_eye, SladdRegion.nose], + [SladdRegion.right_eye, SladdRegion.nose], + [SladdRegion.nose, SladdRegion.mouth], + [SladdRegion.left_eye, SladdRegion.right_eye, SladdRegion.nose], + ALL_REGIONS, + ] + + def init(self, compose: bool = False, single: bool = True, **kwargs): + # super().__init__(**kwargs) + self.compose = compose + if compose: + self.regions = SladdMasking.REGIONS + else: + self.regions = [reg for reg in SladdMasking.REGIONS if len(reg) == 1] + if single: + self.regions = [self.ALL_REGIONS] + + @property + def total(self) -> int: + return len(self.regions) + + @staticmethod + def parse(img, reg, landmarks) -> np.ndarray: + five_key = get_five_key(landmarks) + if reg is SladdRegion.left_eye: + mask = remove_eyes(img, five_key, "l") + elif reg is SladdRegion.right_eye: + mask = remove_eyes(img, five_key, "r") + elif reg is SladdRegion.nose: + mask = remove_nose(img, five_key) + elif reg is SladdRegion.mouth: + mask = remove_mouth(img, five_key) + else: + raise ValueError("Invalid region") + # elif reg == SladdRegion4: + # mask = remove_eyes(img, five_key, "b") + return mask + + def build_mask(self) -> np.ndarray: + self.init() + h, w = self.face.shape[:2] + # print(len(self.regions)) + regs = [self.regions[0][self.idx]] + # if isinstance(reg, int): + # mask = parse(img, reg, landmarks) + masks = [SladdMasking.parse(self.face, reg, self.landmarks) for reg in regs] + mask = reduce(np.maximum, masks) + mask = mask.reshape([mask.shape[0],mask.shape[1], 1]) + + return mask diff --git a/training/dataset/utils/attribution_mask.py b/training/dataset/utils/attribution_mask.py new file mode 100644 index 0000000000000000000000000000000000000000..6c0f9b06f7421fe94e2eae3d2eb44bc493237562 --- /dev/null +++ b/training/dataset/utils/attribution_mask.py @@ -0,0 +1,55 @@ + + +import cv2 +import math +import numpy as np +from scipy.ndimage import binary_erosion, binary_dilation +def dist(p1, p2): + return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) + +def remove_mouth(image, landmarks): + (x1, y1), (x2, y2) = landmarks[3:5] + mask = np.zeros_like(image[..., 0]) + line = cv2.line(mask, (x1, y1), (x2, y2), color=(1), thickness=2) + w = dist((x1, y1), (x2, y2)) + dilation = int(w // 3) + line = binary_dilation(line, iterations=dilation) + return line + +def remove_eyes(image, landmarks, opt='b'): + ##l: left eye; r: right eye, b: both eye + if opt == 'l': + (x1, y1), (x2, y2) = landmarks[36],landmarks[39] + elif opt == 'r': + (x1, y1), (x2, y2) = landmarks[42],landmarks[46] + elif opt == 'b': + (x1, y1), (x2, y2) = landmarks[36],landmarks[46] + else: + print('wrong region') + mask = np.zeros_like(image[..., 0]) + line = cv2.line(np.array(mask, dtype=np.uint8), (int(x1), int(y1)), (int(x2), int(y2)), color=(1), thickness=2) + w = dist((x1, y1), (x2, y2)) + dilation = int(w // 4) + if opt != 'b': + dilation *= 4 + line = binary_dilation(line, iterations=dilation) + return line + +def remove_nose(image, landmarks): + ##l: left eye; r: right eye, b: both eye + + (x1, y1), (x2, y2) = landmarks[27], landmarks[30] + mask = np.zeros_like(image[..., 0]) + line = cv2.line(np.array(mask, dtype=np.uint8), (int(x1), int(y1)), (int(x2), int(y2)), color=(1), thickness=2) + w = dist((x1, y1), (x2, y2)) + dilation = int(w // 3) + line1 = binary_dilation(line, iterations=dilation) + + (x1, y1), (x2, y2) = landmarks[31], landmarks[35] + mask = np.zeros_like(image[..., 0]) + line = cv2.line(np.array(mask, dtype=np.uint8), (int(x1), int(y1)), (int(x2), int(y2)), color=(1), thickness=2) + w = dist((x1, y1), (x2, y2)) + dilation = int(w //4 ) + line2 = binary_dilation(line, iterations=dilation) + + return line1+line2 \ No newline at end of file diff --git a/training/dataset/utils/bi_online_generation.py b/training/dataset/utils/bi_online_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..4386a290f0bb4f55e7ce01177709217b612bb11c --- /dev/null +++ b/training/dataset/utils/bi_online_generation.py @@ -0,0 +1,289 @@ +import dlib +from skimage import io +from skimage import transform as sktransform +import numpy as np +from matplotlib import pyplot as plt +import json +import os +import random +from PIL import Image +from imgaug import augmenters as iaa +from dataset.library.DeepFakeMask import dfl_full,facehull,components,extended +from dataset.utils.attribution_mask import * +import cv2 +import tqdm + +''' +from PIL import ImageDraw +# 创建一个可以在图像上绘制的对象 +img_pil=Image.fromarray(img) +draw = ImageDraw.Draw(img_pil) + +# 在图像上绘制点 +for i, point in enumerate(landmark): + x, y = point + radius = 1 # 点的半径 + draw.ellipse((x-radius, y-radius, x+radius, y+radius), fill="red") + draw.text((x+radius+2, y-radius), str(i), fill="black") # 在点旁边添加标签 +img_pil.show() +''' + + +def name_resolve(path): + name = os.path.splitext(os.path.basename(path))[0] + vid_id, frame_id = name.split('_')[0:2] + return vid_id, frame_id + +def total_euclidean_distance(a,b): + assert len(a.shape) == 2 + return np.sum(np.linalg.norm(a-b,axis=1)) + +def get_five_key(landmarks_68): + # get the five key points by using the landmarks + leye_center = (landmarks_68[36] + landmarks_68[39])*0.5 + reye_center = (landmarks_68[42] + landmarks_68[45])*0.5 + nose = landmarks_68[33] + lmouth = landmarks_68[48] + rmouth = landmarks_68[54] + leye_left = landmarks_68[36] + leye_right = landmarks_68[39] + reye_left = landmarks_68[42] + reye_right = landmarks_68[45] + out = [ tuple(x.astype('int32')) for x in [ + leye_center,reye_center,nose,lmouth,rmouth,leye_left,leye_right,reye_left,reye_right + ]] + return out + +def random_get_hull(landmark,img1,hull_type=None): + if hull_type==None: + hull_type = random.choice([0,1,2,3]) + if hull_type == 0: + mask = dfl_full(landmarks=landmark.astype('int32'),face=img1, channels=3).mask + return mask[:,:,0]/255 + elif hull_type == 1: + mask = extended(landmarks=landmark.astype('int32'),face=img1, channels=3).mask + return mask[:,:,0]/255 + elif hull_type == 2: + mask = components(landmarks=landmark.astype('int32'),face=img1, channels=3).mask + return mask[:,:,0]/255 + elif hull_type == 3: + mask = facehull(landmarks=landmark.astype('int32'),face=img1, channels=3).mask + return mask[:,:,0]/255 + elif hull_type == 4: + mask = remove_mouth(img1,get_five_key(landmark)) + return mask.astype(np.float32) + elif hull_type == 5: + mask = remove_eyes(img1,landmark) + return mask.astype(np.float32) + elif hull_type == 6: + mask = remove_nose(img1,landmark) + return mask.astype(np.float32) + elif hull_type == 7: + mask = remove_nose(img1,landmark) + remove_eyes(img1,landmark) + remove_mouth(img1,get_five_key(landmark)) + return mask.astype(np.float32) + + +def random_erode_dilate(mask, ksize=None): + if random.random()>0.5: + if ksize is None: + ksize = random.randint(1,21) + if ksize % 2 == 0: + ksize += 1 + mask = np.array(mask).astype(np.uint8)*255 + kernel = np.ones((ksize,ksize),np.uint8) + mask = cv2.erode(mask,kernel,1)/255 + else: + if ksize is None: + ksize = random.randint(1,5) + if ksize % 2 == 0: + ksize += 1 + mask = np.array(mask).astype(np.uint8)*255 + kernel = np.ones((ksize,ksize),np.uint8) + mask = cv2.dilate(mask,kernel,1)/255 + return mask + + +# borrow from https://github.com/MarekKowalski/FaceSwap +def blendImages(src, dst, mask, featherAmount=0.2): + + maskIndices = np.where(mask != 0) + + src_mask = np.ones_like(mask) + dst_mask = np.zeros_like(mask) + + maskPts = np.hstack((maskIndices[1][:, np.newaxis], maskIndices[0][:, np.newaxis])) + faceSize = np.max(maskPts, axis=0) - np.min(maskPts, axis=0) + featherAmount = featherAmount * np.max(faceSize) + + hull = cv2.convexHull(maskPts) + dists = np.zeros(maskPts.shape[0]) + for i in range(maskPts.shape[0]): + dists[i] = cv2.pointPolygonTest(hull, (maskPts[i, 0], maskPts[i, 1]), True) + + weights = np.clip(dists / featherAmount, 0, 1) + + composedImg = np.copy(dst) + composedImg[maskIndices[0], maskIndices[1]] = weights[:, np.newaxis] * src[maskIndices[0], maskIndices[1]] + (1 - weights[:, np.newaxis]) * dst[maskIndices[0], maskIndices[1]] + + composedMask = np.copy(dst_mask) + composedMask[maskIndices[0], maskIndices[1]] = weights[:, np.newaxis] * src_mask[maskIndices[0], maskIndices[1]] + ( + 1 - weights[:, np.newaxis]) * dst_mask[maskIndices[0], maskIndices[1]] + + return composedImg, composedMask + + +# borrow from https://github.com/MarekKowalski/FaceSwap +def colorTransfer(src, dst, mask): + transferredDst = np.copy(dst) + + maskIndices = np.where(mask != 0) + + + maskedSrc = src[maskIndices[0], maskIndices[1]].astype(np.int32) + maskedDst = dst[maskIndices[0], maskIndices[1]].astype(np.int32) + + meanSrc = np.mean(maskedSrc, axis=0) + meanDst = np.mean(maskedDst, axis=0) + + maskedDst = maskedDst - meanDst + maskedDst = maskedDst + meanSrc + maskedDst = np.clip(maskedDst, 0, 255) + + transferredDst[maskIndices[0], maskIndices[1]] = maskedDst + + return transferredDst + +class BIOnlineGeneration(): + def __init__(self): + with open('precomuted_landmarks.json', 'r') as f: + self.landmarks_record = json.load(f) + for k,v in self.landmarks_record.items(): + self.landmarks_record[k] = np.array(v) + # extract all frame from all video in the name of {videoid}_{frameid} + self.data_list = [ + '000_0000.png', + '001_0000.png' + ] * 10000 + + # predefine mask distortion + self.distortion = iaa.Sequential([iaa.PiecewiseAffine(scale=(0.01, 0.15))]) + + def gen_one_datapoint(self): + background_face_path = random.choice(self.data_list) + data_type = 'real' if random.randint(0,1) else 'fake' + if data_type == 'fake' : + face_img,mask = self.get_blended_face(background_face_path) + mask = ( 1 - mask ) * mask * 4 + else: + face_img = io.imread(background_face_path) + mask = np.zeros((317, 317, 1)) + + # randomly downsample after BI pipeline + if random.randint(0,1): + aug_size = random.randint(64, 317) + face_img = Image.fromarray(face_img) + if random.randint(0,1): + face_img = face_img.resize((aug_size, aug_size), Image.BILINEAR) + else: + face_img = face_img.resize((aug_size, aug_size), Image.NEAREST) + face_img = face_img.resize((317, 317),Image.BILINEAR) + face_img = np.array(face_img) + + # random jpeg compression after BI pipeline + if random.randint(0,1): + quality = random.randint(60, 100) + encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality] + face_img_encode = cv2.imencode('.jpg', face_img, encode_param)[1] + face_img = cv2.imdecode(face_img_encode, cv2.IMREAD_COLOR) + + face_img = face_img[60:317,30:287,:] + mask = mask[60:317,30:287,:] + + # random flip + if random.randint(0,1): + face_img = np.flip(face_img,1) + mask = np.flip(mask,1) + + return face_img,mask,data_type + + def get_blended_face(self,background_face_path): + background_face = io.imread(background_face_path) + background_landmark = self.landmarks_record[background_face_path] + + foreground_face_path = self.search_similar_face(background_landmark,background_face_path) + foreground_face = io.imread(foreground_face_path) + + # down sample before blending + aug_size = random.randint(128,317) + background_landmark = background_landmark * (aug_size/317) + foreground_face = sktransform.resize(foreground_face,(aug_size,aug_size),preserve_range=True).astype(np.uint8) + background_face = sktransform.resize(background_face,(aug_size,aug_size),preserve_range=True).astype(np.uint8) + + # get random type of initial blending mask + mask = random_get_hull(background_landmark, background_face) + + # random deform mask + mask = self.distortion.augment_image(mask) + mask = random_erode_dilate(mask) + + # filte empty mask after deformation + if np.sum(mask) == 0 : + raise NotImplementedError + + # apply color transfer + foreground_face = colorTransfer(background_face, foreground_face, mask*255) + + # blend two face + blended_face, mask = blendImages(foreground_face, background_face, mask*255) + blended_face = blended_face.astype(np.uint8) + + # resize back to default resolution + blended_face = sktransform.resize(blended_face,(317,317),preserve_range=True).astype(np.uint8) + mask = sktransform.resize(mask,(317,317),preserve_range=True) + mask = mask[:,:,0:1] + return blended_face,mask + + def search_similar_face(self,this_landmark,background_face_path): + vid_id, frame_id = name_resolve(background_face_path) + min_dist = 99999999 + + # random sample 5000 frame from all frams: + all_candidate_path = random.sample( self.data_list, k=5000) + + # filter all frame that comes from the same video as background face + all_candidate_path = filter(lambda k:name_resolve(k)[0] != vid_id, all_candidate_path) + all_candidate_path = list(all_candidate_path) + + # loop throungh all candidates frame to get best match + for candidate_path in all_candidate_path: + candidate_landmark = self.landmarks_record[candidate_path].astype(np.float32) + candidate_distance = total_euclidean_distance(candidate_landmark, this_landmark) + if candidate_distance < min_dist: + min_dist = candidate_distance + min_path = candidate_path + + return min_path + +if __name__ == '__main__': + ds = BIOnlineGeneration() + from tqdm import tqdm + all_imgs = [] + for _ in tqdm(range(50)): + img,mask,label = ds.gen_one_datapoint() + mask = np.repeat(mask,3,2) + mask = (mask*255).astype(np.uint8) + img_cat = np.concatenate([img,mask],1) + all_imgs.append(img_cat) + all_in_one = Image.new('RGB', (2570,2570)) + + for x in range(5): + for y in range(10): + idx = x*10+y + im = Image.fromarray(all_imgs[idx]) + + dx = x*514 + dy = y*257 + + all_in_one.paste(im, (dx,dy)) + + all_in_one.save("all_in_one.jpg") \ No newline at end of file diff --git a/training/dataset/utils/bi_online_generation_yzy.py b/training/dataset/utils/bi_online_generation_yzy.py new file mode 100644 index 0000000000000000000000000000000000000000..1cc85db2845a9f59142df9e21b5af5a90e91950c --- /dev/null +++ b/training/dataset/utils/bi_online_generation_yzy.py @@ -0,0 +1,268 @@ +import dlib +from skimage import io +from skimage import transform as sktransform +import numpy as np +from matplotlib import pyplot as plt +import json +import os +import random +from PIL import Image +from imgaug import augmenters as iaa +from .DeepFakeMask import dfl_full,facehull,components,extended,gridMasking,MeshgridMasking, facehull2 +from .SLADD import SladdMasking +import cv2 +import torch +import torch.nn as nn +import tqdm +import pdb + + +def name_resolve(path): + name = os.path.splitext(os.path.basename(path))[0] + vid_id, frame_id = name.split('_')[0:2] + return vid_id, frame_id + +def total_euclidean_distance(a,b): + assert len(a.shape) == 2 + return np.sum(np.linalg.norm(a-b,axis=1)) + +def random_get_hull(landmark,img1,hull_type0, idx=0): + # print("in bi online generation----------",hull_type0) + if hull_type0 == -1: + hull_type = random.choice([0,1,2,3]) + else: + # hull_type = int(random.choice(hull_type0)) + hull_type = hull_type0 + # print(hull_type) + if hull_type == 0: + # print("here") + mask = dfl_full(landmarks=landmark.astype('int32'),face=img1, channels=3).mask + return mask/255, idx + elif hull_type == 1: + mask = extended(landmarks=landmark.astype('int32'),face=img1, channels=3).mask + return mask/255, idx + elif hull_type == 2: + mask = components(landmarks=landmark.astype('int32'),face=img1, channels=3).mask + return mask/255, idx + elif hull_type == 3: + mask = facehull(landmarks=landmark.astype('int32'),face=img1, channels=3).mask + return mask/255, idx # --change0628-- mask/255 + + # elif hull_type == 4: # SLADD + # mask = SladdMasking(landmarks=landmark.astype('int32'),face=img1, channels=3, idx=0).mask + # return mask/1., idx + # elif hull_type == 5: # SLADD + # mask = SladdMasking(landmarks=landmark.astype('int32'),face=img1, channels=3, idx=1).mask + # return mask/1., idx + # elif hull_type == 6: # SLADD + # mask = SladdMasking(landmarks=landmark.astype('int32'),face=img1, channels=3, idx=2).mask + # return mask/1., idx + elif hull_type == 6: # SLADD/mouth + mask = SladdMasking(landmarks=landmark.astype('int32'),face=img1, channels=3, idx=3).mask + return mask/1., idx + + +def random_erode_dilate(mask, ksize=None): + if random.random()>0.5: + if ksize is None: + ksize = random.randint(1,21) + if ksize % 2 == 0: + ksize += 1 + mask = np.array(mask).astype(np.uint8)*255 + kernel = np.ones((ksize,ksize),np.uint8) + mask = cv2.erode(mask,kernel,1)/255 + else: + if ksize is None: + ksize = random.randint(1,5) + if ksize % 2 == 0: + ksize += 1 + mask = np.array(mask).astype(np.uint8)*255 + kernel = np.ones((ksize,ksize),np.uint8) + mask = cv2.dilate(mask,kernel,1)/255 + return mask + + +# borrow from https://github.com/MarekKowalski/FaceSwap +def blendImages(src, dst, mask, featherAmount=0.2): + + maskIndices = np.where(mask != 0) + + src_mask = np.ones_like(mask) + dst_mask = np.zeros_like(mask) + + maskPts = np.hstack((maskIndices[1][:, np.newaxis], maskIndices[0][:, np.newaxis])) + faceSize = np.max(maskPts, axis=0) - np.min(maskPts, axis=0) + featherAmount = featherAmount * np.max(faceSize) + + hull = cv2.convexHull(maskPts) + dists = np.zeros(maskPts.shape[0]) + for i in range(maskPts.shape[0]): + dists[i] = cv2.pointPolygonTest(hull, (maskPts[i, 0], maskPts[i, 1]), True) + + weights = np.clip(dists / featherAmount, 0, 1) + + composedImg = np.copy(dst) + composedImg[maskIndices[0], maskIndices[1]] = weights[:, np.newaxis] * src[maskIndices[0], maskIndices[1]] + (1 - weights[:, np.newaxis]) * dst[maskIndices[0], maskIndices[1]] + + composedMask = np.copy(dst_mask) + composedMask[maskIndices[0], maskIndices[1]] = weights[:, np.newaxis] * src_mask[maskIndices[0], maskIndices[1]] + ( + 1 - weights[:, np.newaxis]) * dst_mask[maskIndices[0], maskIndices[1]] + + return composedImg, composedMask + + +# borrow from https://github.com/MarekKowalski/FaceSwap +def colorTransfer(src, dst, mask): + transferredDst = np.copy(dst) + + maskIndices = np.where(mask != 0) + + + maskedSrc = src[maskIndices[0], maskIndices[1]].astype(np.int32) + maskedDst = dst[maskIndices[0], maskIndices[1]].astype(np.int32) + + meanSrc = np.mean(maskedSrc, axis=0) + meanDst = np.mean(maskedDst, axis=0) + + maskedDst = maskedDst - meanDst + maskedDst = maskedDst + meanSrc + maskedDst = np.clip(maskedDst, 0, 255) + + transferredDst[maskIndices[0], maskIndices[1]] = maskedDst + + return transferredDst + +class BIOnlineGeneration(): + def __init__(self): + with open('precomuted_landmarks.json', 'r') as f: + self.landmarks_record = json.load(f) + for k,v in self.landmarks_record.items(): + self.landmarks_record[k] = np.array(v) + # extract all frame from all video in the name of {videoid}_{frameid} + self.data_list = [ + '000_0000.png', + '001_0000.png' + ] * 10000 + + # predefine mask distortion + self.distortion = iaa.Sequential([iaa.PiecewiseAffine(scale=(0.01, 0.15))]) + + def gen_one_datapoint(self): + background_face_path = random.choice(self.data_list) + data_type = 'real' if random.randint(0,1) else 'fake' + if data_type == 'fake' : + face_img,mask = self.get_blended_face(background_face_path) + mask = ( 1 - mask ) * mask * 4 + else: + face_img = io.imread(background_face_path) + mask = np.zeros((317, 317, 1)) + + # randomly downsample after BI pipeline + if random.randint(0,1): + aug_size = random.randint(64, 317) + face_img = Image.fromarray(face_img) + if random.randint(0,1): + face_img = face_img.resize((aug_size, aug_size), Image.BILINEAR) + else: + face_img = face_img.resize((aug_size, aug_size), Image.NEAREST) + face_img = face_img.resize((317, 317),Image.BILINEAR) + face_img = np.array(face_img) + + # random jpeg compression after BI pipeline + if random.randint(0,1): + quality = random.randint(60, 100) + encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality] + face_img_encode = cv2.imencode('.jpg', face_img, encode_param)[1] + face_img = cv2.imdecode(face_img_encode, cv2.IMREAD_COLOR) + + face_img = face_img[60:317,30:287,:] + mask = mask[60:317,30:287,:] + + # random flip + if random.randint(0,1): + face_img = np.flip(face_img,1) + mask = np.flip(mask,1) + + return face_img,mask,data_type + + def get_blended_face(self,background_face_path): + background_face = io.imread(background_face_path) + background_landmark = self.landmarks_record[background_face_path] + + foreground_face_path = self.search_similar_face(background_landmark,background_face_path) + foreground_face = io.imread(foreground_face_path) + + # down sample before blending + aug_size = random.randint(128,317) + background_landmark = background_landmark * (aug_size/317) + foreground_face = sktransform.resize(foreground_face,(aug_size,aug_size),preserve_range=True).astype(np.uint8) + background_face = sktransform.resize(background_face,(aug_size,aug_size),preserve_range=True).astype(np.uint8) + + # get random type of initial blending mask + mask, idx = random_get_hull(background_landmark, background_face) + + # random deform mask + mask = self.distortion.augment_image(mask) + mask = random_erode_dilate(mask) + + # filte empty mask after deformation + if np.sum(mask) == 0 : + raise NotImplementedError + + # apply color transfer + foreground_face = colorTransfer(background_face, foreground_face, mask*255) + + # blend two face + blended_face, mask = blendImages(foreground_face, background_face, mask*255) + blended_face = blended_face.astype(np.uint8) + + # resize back to default resolution + blended_face = sktransform.resize(blended_face,(317,317),preserve_range=True).astype(np.uint8) + mask = sktransform.resize(mask,(317,317),preserve_range=True) + mask = mask[:,:,0:1] + return blended_face,mask + + def search_similar_face(self,this_landmark,background_face_path): + vid_id, frame_id = name_resolve(background_face_path) + min_dist = 99999999 + + # random sample 5000 frame from all frams: + all_candidate_path = random.sample( self.data_list, k=5000) + + # filter all frame that comes from the same video as background face + all_candidate_path = filter(lambda k:name_resolve(k)[0] != vid_id, all_candidate_path) + all_candidate_path = list(all_candidate_path) + + # loop throungh all candidates frame to get best match + for candidate_path in all_candidate_path: + candidate_landmark = self.landmarks_record[candidate_path].astype(np.float32) + candidate_distance = total_euclidean_distance(candidate_landmark, this_landmark) + if candidate_distance < min_dist: + min_dist = candidate_distance + min_path = candidate_path + + return min_path + +if __name__ == '__main__': + ds = BIOnlineGeneration() + from tqdm import tqdm + all_imgs = [] + for _ in tqdm(range(50)): + img,mask,label = ds.gen_one_datapoint() + mask = np.repeat(mask,3,2) + mask = (mask*255).astype(np.uint8) + img_cat = np.concatenate([img,mask],1) + all_imgs.append(img_cat) + all_in_one = Image.new('RGB', (2570,2570)) + + for x in range(5): + for y in range(10): + idx = x*10+y + im = Image.fromarray(all_imgs[idx]) + + dx = x*514 + dy = y*257 + + all_in_one.paste(im, (dx,dy)) + + all_in_one.save("all_in_one.jpg") \ No newline at end of file diff --git a/training/dataset/utils/color_transfer.py b/training/dataset/utils/color_transfer.py new file mode 100644 index 0000000000000000000000000000000000000000..0845dbcf8221b400218411f11968eb4156233022 --- /dev/null +++ b/training/dataset/utils/color_transfer.py @@ -0,0 +1,516 @@ +import cv2 +import numpy as np +from numpy import linalg as npla + +import scipy as sp +import scipy.sparse +from scipy.sparse.linalg import spsolve + + +def color_transfer_sot(src, trg, steps=10, batch_size=5, reg_sigmaXY=16.0, reg_sigmaV=5.0): + """ + Color Transform via Sliced Optimal Transfer + ported by @iperov from https://github.com/dcoeurjo/OTColorTransfer + + src - any float range any channel image + dst - any float range any channel image, same shape as src + steps - number of solver steps + batch_size - solver batch size + reg_sigmaXY - apply regularization and sigmaXY of filter, otherwise set to 0.0 + reg_sigmaV - sigmaV of filter + + return value - clip it manually + """ + if not np.issubdtype(src.dtype, np.floating): + raise ValueError("src value must be float") + if not np.issubdtype(trg.dtype, np.floating): + raise ValueError("trg value must be float") + + if len(src.shape) != 3: + raise ValueError("src shape must have rank 3 (h,w,c)") + + if src.shape != trg.shape: + raise ValueError("src and trg shapes must be equal") + + src_dtype = src.dtype + h, w, c = src.shape + new_src = src.copy() + + for step in range(steps): + advect = np.zeros((h*w, c), dtype=src_dtype) + for batch in range(batch_size): + dir = np.random.normal(size=c).astype(src_dtype) + dir /= npla.norm(dir) + + projsource = np.sum(new_src*dir, axis=-1).reshape((h*w)) + projtarget = np.sum(trg*dir, axis=-1).reshape((h*w)) + + idSource = np.argsort(projsource) + idTarget = np.argsort(projtarget) + + a = projtarget[idTarget]-projsource[idSource] + for i_c in range(c): + advect[idSource, i_c] += a * dir[i_c] + new_src += advect.reshape((h, w, c)) / batch_size + + if reg_sigmaXY != 0.0: + src_diff = new_src-src + src_diff_filt = cv2.bilateralFilter( + src_diff, 0, reg_sigmaV, reg_sigmaXY) + if len(src_diff_filt.shape) == 2: + src_diff_filt = src_diff_filt[..., None] + new_src = src + src_diff_filt + return new_src + + +def color_transfer_mkl(x0, x1): + eps = np.finfo(float).eps + + h, w, c = x0.shape + h1, w1, c1 = x1.shape + + x0 = x0.reshape((h*w, c)) + x1 = x1.reshape((h1*w1, c1)) + + a = np.cov(x0.T) + b = np.cov(x1.T) + + Da2, Ua = np.linalg.eig(a) + Da = np.diag(np.sqrt(Da2.clip(eps, None))) + + C = np.dot(np.dot(np.dot(np.dot(Da, Ua.T), b), Ua), Da) + + Dc2, Uc = np.linalg.eig(C) + Dc = np.diag(np.sqrt(Dc2.clip(eps, None))) + + Da_inv = np.diag(1./(np.diag(Da))) + + t = np.dot( + np.dot(np.dot(np.dot(np.dot(np.dot(Ua, Da_inv), Uc), Dc), Uc.T), Da_inv), Ua.T) + + mx0 = np.mean(x0, axis=0) + mx1 = np.mean(x1, axis=0) + + result = np.dot(x0-mx0, t) + mx1 + return np.clip(result.reshape((h, w, c)).astype(x0.dtype), 0, 1) + + +def color_transfer_idt(i0, i1, bins=256, n_rot=20): + relaxation = 1 / n_rot + h, w, c = i0.shape + h1, w1, c1 = i1.shape + + i0 = i0.reshape((h*w, c)) + i1 = i1.reshape((h1*w1, c1)) + + n_dims = c + + d0 = i0.T + d1 = i1.T + + for i in range(n_rot): + + r = sp.stats.special_ortho_group.rvs(n_dims).astype(np.float32) + + d0r = np.dot(r, d0) + d1r = np.dot(r, d1) + d_r = np.empty_like(d0) + + for j in range(n_dims): + + lo = min(d0r[j].min(), d1r[j].min()) + hi = max(d0r[j].max(), d1r[j].max()) + + p0r, edges = np.histogram(d0r[j], bins=bins, range=[lo, hi]) + p1r, _ = np.histogram(d1r[j], bins=bins, range=[lo, hi]) + + cp0r = p0r.cumsum().astype(np.float32) + cp0r /= cp0r[-1] + + cp1r = p1r.cumsum().astype(np.float32) + cp1r /= cp1r[-1] + + f = np.interp(cp0r, cp1r, edges[1:]) + + d_r[j] = np.interp(d0r[j], edges[1:], f, left=0, right=bins) + + d0 = relaxation * np.linalg.solve(r, (d_r - d0r)) + d0 + + return np.clip(d0.T.reshape((h, w, c)).astype(i0.dtype), 0, 1) + + +def laplacian_matrix(n, m): + mat_D = scipy.sparse.lil_matrix((m, m)) + mat_D.setdiag(-1, -1) + mat_D.setdiag(4) + mat_D.setdiag(-1, 1) + mat_A = scipy.sparse.block_diag([mat_D] * n).tolil() + mat_A.setdiag(-1, 1*m) + mat_A.setdiag(-1, -1*m) + return mat_A + + +def seamless_clone(source, target, mask): + h, w, c = target.shape + result = [] + + mat_A = laplacian_matrix(h, w) + laplacian = mat_A.tocsc() + + mask[0, :] = 1 + mask[-1, :] = 1 + mask[:, 0] = 1 + mask[:, -1] = 1 + q = np.argwhere(mask == 0) + + k = q[:, 1]+q[:, 0]*w + mat_A[k, k] = 1 + mat_A[k, k + 1] = 0 + mat_A[k, k - 1] = 0 + mat_A[k, k + w] = 0 + mat_A[k, k - w] = 0 + + mat_A = mat_A.tocsc() + mask_flat = mask.flatten() + for channel in range(c): + + source_flat = source[:, :, channel].flatten() + target_flat = target[:, :, channel].flatten() + + mat_b = laplacian.dot(source_flat)*0.75 + mat_b[mask_flat == 0] = target_flat[mask_flat == 0] + + x = spsolve(mat_A, mat_b).reshape((h, w)) + result.append(x) + + return np.clip(np.dstack(result), 0, 1) + + +def reinhard_color_transfer(target, source, clip=False, preserve_paper=False, source_mask=None, target_mask=None): + """ + Transfers the color distribution from the source to the target + image using the mean and standard deviations of the L*a*b* + color space. + + This implementation is (loosely) based on to the "Color Transfer + between Images" paper by Reinhard et al., 2001. + + Parameters: + ------- + source: NumPy array + OpenCV image in BGR color space (the source image) + target: NumPy array + OpenCV image in BGR color space (the target image) + clip: Should components of L*a*b* image be scaled by np.clip before + converting back to BGR color space? + If False then components will be min-max scaled appropriately. + Clipping will keep target image brightness truer to the input. + Scaling will adjust image brightness to avoid washed out portions + in the resulting color transfer that can be caused by clipping. + preserve_paper: Should color transfer strictly follow methodology + layed out in original paper? The method does not always produce + aesthetically pleasing results. + If False then L*a*b* components will scaled using the reciprocal of + the scaling factor proposed in the paper. This method seems to produce + more consistently aesthetically pleasing results + + Returns: + ------- + transfer: NumPy array + OpenCV image (w, h, 3) NumPy array (uint8) + """ + + # convert the images from the RGB to L*ab* color space, being + # sure to utilizing the floating point data type (note: OpenCV + # expects floats to be 32-bit, so use that instead of 64-bit) + source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype(np.float32) + target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype(np.float32) + + # compute color statistics for the source and target images + src_input = source if source_mask is None else source*source_mask + tgt_input = target if target_mask is None else target*target_mask + (lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, + bMeanSrc, bStdSrc) = lab_image_stats(src_input) + (lMeanTar, lStdTar, aMeanTar, aStdTar, + bMeanTar, bStdTar) = lab_image_stats(tgt_input) + + # subtract the means from the target image + (l, a, b) = cv2.split(target) + l -= lMeanTar + a -= aMeanTar + b -= bMeanTar + + if preserve_paper: + # scale by the standard deviations using paper proposed factor + l = (lStdTar / lStdSrc) * l + a = (aStdTar / aStdSrc) * a + b = (bStdTar / bStdSrc) * b + else: + # scale by the standard deviations using reciprocal of paper proposed factor + l = (lStdSrc / lStdTar) * l + a = (aStdSrc / aStdTar) * a + b = (bStdSrc / bStdTar) * b + + # add in the source mean + l += lMeanSrc + a += aMeanSrc + b += bMeanSrc + + # clip/scale the pixel intensities to [0, 255] if they fall + # outside this range + l = _scale_array(l, clip=clip) + a = _scale_array(a, clip=clip) + b = _scale_array(b, clip=clip) + + # merge the channels together and convert back to the RGB color + # space, being sure to utilize the 8-bit unsigned integer data + # type + transfer = cv2.merge([l, a, b]) + transfer = cv2.cvtColor(transfer.astype(np.uint8), cv2.COLOR_LAB2BGR) + + # return the color transferred image + return transfer + + +def linear_color_transfer(target_img, source_img, mode='pca', eps=1e-5): + ''' + Matches the colour distribution of the target image to that of the source image + using a linear transform. + Images are expected to be of form (w,h,c) and float in [0,1]. + Modes are chol, pca or sym for different choices of basis. + ''' + mu_t = target_img.mean(0).mean(0) + t = target_img - mu_t + t = t.transpose(2, 0, 1).reshape(t.shape[-1], -1) + Ct = t.dot(t.T) / t.shape[1] + eps * np.eye(t.shape[0]) + mu_s = source_img.mean(0).mean(0) + s = source_img - mu_s + s = s.transpose(2, 0, 1).reshape(s.shape[-1], -1) + Cs = s.dot(s.T) / s.shape[1] + eps * np.eye(s.shape[0]) + if mode == 'chol': + chol_t = np.linalg.cholesky(Ct) + chol_s = np.linalg.cholesky(Cs) + ts = chol_s.dot(np.linalg.inv(chol_t)).dot(t) + if mode == 'pca': + eva_t, eve_t = np.linalg.eigh(Ct) + Qt = eve_t.dot(np.sqrt(np.diag(eva_t))).dot(eve_t.T) + eva_s, eve_s = np.linalg.eigh(Cs) + Qs = eve_s.dot(np.sqrt(np.diag(eva_s))).dot(eve_s.T) + ts = Qs.dot(np.linalg.inv(Qt)).dot(t) + if mode == 'sym': + eva_t, eve_t = np.linalg.eigh(Ct) + Qt = eve_t.dot(np.sqrt(np.diag(eva_t))).dot(eve_t.T) + Qt_Cs_Qt = Qt.dot(Cs).dot(Qt) + eva_QtCsQt, eve_QtCsQt = np.linalg.eigh(Qt_Cs_Qt) + QtCsQt = eve_QtCsQt.dot(np.sqrt(np.diag(eva_QtCsQt))).dot(eve_QtCsQt.T) + ts = np.linalg.inv(Qt).dot(QtCsQt).dot(np.linalg.inv(Qt)).dot(t) + matched_img = ts.reshape( + *target_img.transpose(2, 0, 1).shape).transpose(1, 2, 0) + matched_img += mu_s + matched_img[matched_img > 1] = 1 + matched_img[matched_img < 0] = 0 + return np.clip(matched_img.astype(source_img.dtype), 0, 1) + + +def lab_image_stats(image): + # compute the mean and standard deviation of each channel + (l, a, b) = cv2.split(image) + (lMean, lStd) = (l.mean(), l.std()) + (aMean, aStd) = (a.mean(), a.std()) + (bMean, bStd) = (b.mean(), b.std()) + + # return the color statistics + return (lMean, lStd, aMean, aStd, bMean, bStd) + + +def _scale_array(arr, clip=True): + if clip: + return np.clip(arr, 0, 255) + + mn = arr.min() + mx = arr.max() + scale_range = (max([mn, 0]), min([mx, 255])) + + if mn < scale_range[0] or mx > scale_range[1]: + return (scale_range[1] - scale_range[0]) * (arr - mn) / (mx - mn) + scale_range[0] + + return arr + + +def channel_hist_match(source, template, hist_match_threshold=255, mask=None): + # Code borrowed from: + # https://stackoverflow.com/questions/32655686/histogram-matching-of-two-images-in-python-2-x + masked_source = source + masked_template = template + + if mask is not None: + masked_source = source * mask + masked_template = template * mask + + oldshape = source.shape + source = source.ravel() + template = template.ravel() + masked_source = masked_source.ravel() + masked_template = masked_template.ravel() + s_values, bin_idx, s_counts = np.unique(source, return_inverse=True, + return_counts=True) + t_values, t_counts = np.unique(template, return_counts=True) + + s_quantiles = np.cumsum(s_counts).astype(np.float64) + s_quantiles = hist_match_threshold * s_quantiles / s_quantiles[-1] + t_quantiles = np.cumsum(t_counts).astype(np.float64) + t_quantiles = 255 * t_quantiles / t_quantiles[-1] + interp_t_values = np.interp(s_quantiles, t_quantiles, t_values) + + return interp_t_values[bin_idx].reshape(oldshape) + + +def color_hist_match(src_im, tar_im, hist_match_threshold=255, mask=None): + h, w, c = src_im.shape + matched_R = channel_hist_match( + src_im[:, :, 0], tar_im[:, :, 0], hist_match_threshold, mask) + matched_G = channel_hist_match( + src_im[:, :, 1], tar_im[:, :, 1], hist_match_threshold, mask) + matched_B = channel_hist_match( + src_im[:, :, 2], tar_im[:, :, 2], hist_match_threshold, mask) + + to_stack = (matched_R, matched_G, matched_B) + for i in range(3, c): + to_stack += (src_im[:, :, i],) + + matched = np.stack(to_stack, axis=-1).astype(src_im.dtype) + return matched + + +def color_transfer_mix(img_src, img_trg): + img_src = np.clip(img_src*255.0, 0, 255).astype(np.uint8) + img_trg = np.clip(img_trg*255.0, 0, 255).astype(np.uint8) + + img_src_lab = cv2.cvtColor(img_src, cv2.COLOR_BGR2LAB) + img_trg_lab = cv2.cvtColor(img_trg, cv2.COLOR_BGR2LAB) + + rct_light = np.clip(linear_color_transfer(img_src_lab[..., 0:1].astype(np.float32)/255.0, + img_trg_lab[..., 0:1].astype(np.float32)/255.0)[..., 0]*255.0, + 0, 255).astype(np.uint8) + + img_src_lab[..., 0] = (np.ones_like(rct_light)*100).astype(np.uint8) + img_src_lab = cv2.cvtColor(img_src_lab, cv2.COLOR_LAB2BGR) + + img_trg_lab[..., 0] = (np.ones_like(rct_light)*100).astype(np.uint8) + img_trg_lab = cv2.cvtColor(img_trg_lab, cv2.COLOR_LAB2BGR) + + img_rct = color_transfer_sot(img_src_lab.astype( + np.float32), img_trg_lab.astype(np.float32)) + img_rct = np.clip(img_rct, 0, 255).astype(np.uint8) + + img_rct = cv2.cvtColor(img_rct, cv2.COLOR_BGR2LAB) + img_rct[..., 0] = rct_light + img_rct = cv2.cvtColor(img_rct, cv2.COLOR_LAB2BGR) + + return (img_rct / 255.0).astype(np.float32) + + +def colorTransfer_fs(src_, dst_, mask): + src = dst_ + dst = src_ + transferredDst = np.copy(dst) + # indeksy nie czarnych pikseli maski + maskIndices = np.where(mask != 0) + # src[maskIndices[0], maskIndices[1]] zwraca piksele w nie czarnym obszarze maski + + maskedSrc = src[maskIndices[0], maskIndices[1]].astype(np.int32) + maskedDst = dst[maskIndices[0], maskIndices[1]].astype(np.int32) + + meanSrc = np.mean(maskedSrc, axis=0) + meanDst = np.mean(maskedDst, axis=0) + + maskedDst = maskedDst - meanDst + maskedDst = maskedDst + meanSrc + maskedDst = np.clip(maskedDst, 0, 255) + + transferredDst[maskIndices[0], maskIndices[1]] = maskedDst + return transferredDst + +def colorTransfer_avg(img_src, img_tgt, mask=None): + img_new = img_src.copy() + img_old = img_tgt.copy() + # print(mask) + if mask is not None: + img_new = (img_new*mask)#.astype(np.uint8) + img_old = (img_old*mask)#.astype(np.uint8) + # cv2.imshow('tgt', img_old) + w,h,c = img_new.shape + for i in range(img_new.shape[2]): + old_avg = img_old[:, :, i].mean() + new_avg = img_new[:, :, i].mean() + diff_int = old_avg - new_avg + # print(diff_int) + for m in range(img_new.shape[0]): + for n in range(img_new.shape[1]): + temp = img_new[m,n,i] + diff_int + temp = max(0., temp) + temp = min(1., temp) + # print(img_new[m,n,i], temp) + img_new[m,n,i] = temp + + return img_new + + + +def color_transfer(ct_mode, img_src, img_trg, mask): + """ + color transfer for [0,1] float32 inputs + """ + img_src = img_src.astype(dtype=np.float32) / 255.0 + img_trg = img_trg.astype(dtype=np.float32) / 255.0 + + if ct_mode == 'lct': + out = linear_color_transfer(img_src, img_trg) + elif ct_mode == 'rct': + out = reinhard_color_transfer(np.clip(img_src*255, 0, 255).astype(np.uint8), + np.clip(img_trg*255, 0, + 255).astype(np.uint8), + preserve_paper=np.random.rand() < 0.5, + clip=np.random.rand() < 0.5) + out = np.clip(out.astype(np.float32) / 255.0, 0.0, 1.0) + elif ct_mode == 'rct-m': + out = reinhard_color_transfer(np.clip(img_src*255, 0, 255).astype(np.uint8), + np.clip(img_trg*255, 0, + 255).astype(np.uint8), + source_mask=mask, target_mask=mask) + #preserve_paper=np.random.rand() < 0.5, + #clip=np.random.rand() < 0.5) + out = np.clip(out.astype(np.float32) / 255.0, 0.0, 1.0) + elif ct_mode == 'rct-fs': + out = colorTransfer_fs(np.clip(img_src*255, 0, 255).astype(np.uint8), + np.clip(img_trg*255, 0, 255).astype(np.uint8), mask) + out = np.clip(out.astype(np.float32) / 255.0, 0.0, 1.0) + elif ct_mode == 'mkl': + out = color_transfer_mkl(img_src, img_trg) + elif ct_mode == 'mkl-m': + out = color_transfer_mkl(img_src*mask, img_trg*mask) + elif ct_mode == 'idt': + out = color_transfer_idt(img_src, img_trg) + elif ct_mode == 'idt-m': + out = color_transfer_idt(img_src*mask, img_trg*mask) + elif ct_mode == 'sot': + out = color_transfer_sot(img_src, img_trg) + out = np.clip(out, 0.0, 1.0) + elif ct_mode == 'sot-m': + out = color_transfer_sot( + (img_src*mask).astype(np.float32), (img_trg*mask).astype(np.float32)) + out = np.clip(out, 0.0, 1.0) + elif ct_mode == 'mix-m': + out = color_transfer_mix(img_src*mask, img_trg*mask) + elif ct_mode == 'seamless-hist-match': + out = color_hist_match(img_src, img_trg) + elif ct_mode == 'seamless-hist-match-m': + out = color_hist_match(img_src, img_trg, mask=mask) + elif ct_mode == 'avg-align': + out = colorTransfer_avg(img_src, img_trg, mask=mask) + out = np.clip(out, 0.0, 1.0) + else: + raise ValueError(f"unknown ct_mode {ct_mode}") + + out = np.clip(out*255, 0, 255).astype(np.uint8) + return out \ No newline at end of file diff --git a/training/dataset/utils/face_align.py b/training/dataset/utils/face_align.py new file mode 100644 index 0000000000000000000000000000000000000000..062eee5a6f13d8ae915055923375a476af9afc2c --- /dev/null +++ b/training/dataset/utils/face_align.py @@ -0,0 +1,173 @@ +import numpy + +from .umeyama import umeyama +from numpy.linalg import inv +import cv2 + +mean_face_x = numpy.array([ +0.000213256, 0.0752622, 0.18113, 0.29077, 0.393397, 0.586856, 0.689483, 0.799124, +0.904991, 0.98004, 0.490127, 0.490127, 0.490127, 0.490127, 0.36688, 0.426036, +0.490127, 0.554217, 0.613373, 0.121737, 0.187122, 0.265825, 0.334606, 0.260918, +0.182743, 0.645647, 0.714428, 0.793132, 0.858516, 0.79751, 0.719335, 0.254149, +0.340985, 0.428858, 0.490127, 0.551395, 0.639268, 0.726104, 0.642159, 0.556721, +0.490127, 0.423532, 0.338094, 0.290379, 0.428096, 0.490127, 0.552157, 0.689874, +0.553364, 0.490127, 0.42689 ]) + +mean_face_y = numpy.array([ +0.106454, 0.038915, 0.0187482, 0.0344891, 0.0773906, 0.0773906, 0.0344891, +0.0187482, 0.038915, 0.106454, 0.203352, 0.307009, 0.409805, 0.515625, 0.587326, +0.609345, 0.628106, 0.609345, 0.587326, 0.216423, 0.178758, 0.179852, 0.231733, +0.245099, 0.244077, 0.231733, 0.179852, 0.178758, 0.216423, 0.244077, 0.245099, +0.780233, 0.745405, 0.727388, 0.742578, 0.727388, 0.745405, 0.780233, 0.864805, +0.902192, 0.909281, 0.902192, 0.864805, 0.784792, 0.778746, 0.785343, 0.778746, +0.784792, 0.824182, 0.831803, 0.824182 ]) + +landmarks_2D = numpy.stack( [ mean_face_x, mean_face_y ], axis=1 ) + +def get_align_mat(face, size, should_align_eyes): + mat_umeyama = umeyama(numpy.array(face.landmarks_as_xy()[17:]), landmarks_2D, True)[0:2] + + if should_align_eyes is False: + return mat_umeyama + + mat_umeyama = mat_umeyama * size + + # Convert to matrix + landmarks = numpy.matrix(face.landmarks_as_xy()) + + # cv2 expects points to be in the form np.array([ [[x1, y1]], [[x2, y2]], ... ]), we'll expand the dim + landmarks = numpy.expand_dims(landmarks, axis=1) + + # Align the landmarks using umeyama + umeyama_landmarks = cv2.transform(landmarks, mat_umeyama, landmarks.shape) + + # Determine a rotation matrix to align eyes horizontally + mat_align_eyes = align_eyes(umeyama_landmarks, size) + + # Extend the 2x3 transform matrices to 3x3 so we can multiply them + # and combine them as one + mat_umeyama = numpy.matrix(mat_umeyama) + mat_umeyama.resize((3, 3)) + mat_align_eyes = numpy.matrix(mat_align_eyes) + mat_align_eyes.resize((3, 3)) + mat_umeyama[2] = mat_align_eyes[2] = [0, 0, 1] + + # Combine the umeyama transform with the extra rotation matrix + transform_mat = mat_align_eyes * mat_umeyama + + # Remove the extra row added, shape needs to be 2x3 + transform_mat = numpy.delete(transform_mat, 2, 0) + transform_mat = transform_mat / size + return transform_mat + + +from .face_blend import get_5_keypoint + +def get_align_mat_new(src_lmk, tgt_lmk, size=256, should_align_eyes=False): + mat_umeyama = umeyama(get_5_keypoint(src_lmk), get_5_keypoint(tgt_lmk), True)[0:2] + # mat_umeyama = umeyama(numpy.array(src_lmk[17:]), numpy.array(tgt_lmk[17:]), True)[0:2] + + if should_align_eyes is False: + return mat_umeyama + + mat_umeyama = mat_umeyama * size + + # Convert to matrix + landmarks = numpy.matrix(face.landmarks_as_xy()) + + # cv2 expects points to be in the form np.array([ [[x1, y1]], [[x2, y2]], ... ]), we'll expand the dim + landmarks = numpy.expand_dims(landmarks, axis=1) + + # Align the landmarks using umeyama + umeyama_landmarks = cv2.transform(landmarks, mat_umeyama, landmarks.shape) + + # Determine a rotation matrix to align eyes horizontally + mat_align_eyes = align_eyes(umeyama_landmarks, size) + + # Extend the 2x3 transform matrices to 3x3 so we can multiply them + # and combine them as one + mat_umeyama = numpy.matrix(mat_umeyama) + mat_umeyama.resize((3, 3)) + mat_align_eyes = numpy.matrix(mat_align_eyes) + mat_align_eyes.resize((3, 3)) + mat_umeyama[2] = mat_align_eyes[2] = [0, 0, 1] + + # Combine the umeyama transform with the extra rotation matrix + transform_mat = mat_align_eyes * mat_umeyama + + # Remove the extra row added, shape needs to be 2x3 + transform_mat = numpy.delete(transform_mat, 2, 0) + transform_mat = transform_mat / size + return transform_mat + +# Code borrowed from https://github.com/jrosebr1/imutils/blob/d5cb29d02cf178c399210d5a139a821dfb0ae136/imutils/face_utils/helpers.py +""" +The MIT License (MIT) + +Copyright (c) 2015-2016 Adrian Rosebrock, http://www.pyimagesearch.com + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +from collections import OrderedDict +import numpy as np +import cv2 + +# define a dictionary that maps the indexes of the facial +# landmarks to specific face regions +FACIAL_LANDMARKS_IDXS = OrderedDict([ + ("mouth", (48, 68)), + ("right_eyebrow", (17, 22)), + ("left_eyebrow", (22, 27)), + ("right_eye", (36, 42)), + ("left_eye", (42, 48)), + ("nose", (27, 36)), + ("jaw", (0, 17)), + ("chin", (8, 11)) +]) + +# Returns a rotation matrix that when applied to the 68 input facial landmarks +# results in landmarks with eyes aligned horizontally +def align_eyes(landmarks, size): + desiredLeftEye = (0.35, 0.35) # (y, x) value + desiredFaceWidth = desiredFaceHeight = size + + # extract the left and right eye (x, y)-coordinates + (lStart, lEnd) = FACIAL_LANDMARKS_IDXS["left_eye"] + (rStart, rEnd) = FACIAL_LANDMARKS_IDXS["right_eye"] + leftEyePts = landmarks[lStart:lEnd] + rightEyePts = landmarks[rStart:rEnd] + + # compute the center of mass for each eye + leftEyeCenter = leftEyePts.mean(axis=0).astype("int") + rightEyeCenter = rightEyePts.mean(axis=0).astype("int") + + # compute the angle between the eye centroids + dY = rightEyeCenter[0,1] - leftEyeCenter[0,1] + dX = rightEyeCenter[0,0] - leftEyeCenter[0,0] + angle = np.degrees(np.arctan2(dY, dX)) - 180 + + # compute center (x, y)-coordinates (i.e., the median point) + # between the two eyes in the input image + eyesCenter = ((leftEyeCenter[0,0] + rightEyeCenter[0,0]) // 2, (leftEyeCenter[0,1] + rightEyeCenter[0,1]) // 2) + + # grab the rotation matrix for rotating and scaling the face + M = cv2.getRotationMatrix2D(eyesCenter, angle, 1.0) + + return M \ No newline at end of file diff --git a/training/dataset/utils/face_aug.py b/training/dataset/utils/face_aug.py new file mode 100644 index 0000000000000000000000000000000000000000..3e116af4bcd3dc9fb1821652ae8d4afe5ce9e6de --- /dev/null +++ b/training/dataset/utils/face_aug.py @@ -0,0 +1,125 @@ +""" +Exposing DeepFake Videos By Detecting Face Warping Artifacts +Yuezun Li, Siwei Lyu +https://arxiv.org/abs/1811.00656 +""" +import cv2 +import numpy as np +from PIL import Image, ImageEnhance +# We only use opencv3 +# if not (cv2.__version__).startswith('3.'): +# raise ValueError('Only opencv 3. is supported!') + +''' +these two function is implemented by myself, may have some errors QAQ +''' + + +def change_res(img): + init_res = img.shape[0] + fake_res = np.random.randint(init_res//4, init_res*2) + img = cv2.resize(img, (fake_res, fake_res)) + img = cv2.resize(img, (init_res, init_res)) + return img, fake_res + + +def aug_one_im(img, + random_transform_args=None, + color_rng=[0.9, 1.1]): + """ + Augment operation for image list + :param images: image list + :param random_transform_args: shape transform arguments + :param color_rng: color transform arguments + :return: + """ + images = [img] + images = aug(images, random_transform_args, color_rng) + + return images[0] + + +def aug(images, + random_transform_args={ + 'rotation_range': 10, + 'zoom_range': 0.05, + 'shift_range': 0.05, + 'random_flip': 0.5, + }, + color_rng=[0.9, 1.1]): + """ + Augment operation for image list + :param images: image list + :param random_transform_args: shape transform arguments + :param color_rng: color transform arguments + :return: + """ + if random_transform_args is not None: # do aug + # Transform + images = random_transform(images, **random_transform_args) + # Color + if color_rng is not None: + for i, im in enumerate(images): + # im = im[:, :, (2, 1, 0)] + im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) + im = Image.fromarray(np.uint8(im)) + + # Brightness + factor = np.random.uniform(color_rng[0], color_rng[1]) + enhancer = ImageEnhance.Brightness(im) + im = enhancer.enhance(factor) + # Contrast + factor = np.random.uniform(color_rng[0], color_rng[1]) + enhancer = ImageEnhance.Contrast(im) + im = enhancer.enhance(factor) + # Color distort + factor = np.random.uniform(color_rng[0], color_rng[1]) + enhancer = ImageEnhance.Color(im) + im = enhancer.enhance(factor) + + # Sharpe + factor = np.random.uniform(color_rng[0], color_rng[1]) + enhancer = ImageEnhance.Sharpness(im) + im = enhancer.enhance(factor) + im = np.array(im).astype(np.uint8) + # im = im[:, :, (2, 1, 0)] + im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) + images[i] = im.copy() + + return images + + +def random_transform(images, rotation_range, zoom_range, shift_range, random_flip): + """ + Random transform images in a list + :param images: + :param rotation_range: + :param zoom_range: + :param shift_range: + :param random_flip: + :return: + """ + h, w = images[0].shape[:2] + rotation = np.random.uniform(-rotation_range, rotation_range) + scale = np.random.uniform(1 - zoom_range, 1 + zoom_range) + tx = np.random.uniform(-shift_range, shift_range) * w + ty = np.random.uniform(-shift_range, shift_range) * h + flip_prob = np.random.random() + for i, image in enumerate(images): + mat = cv2.getRotationMatrix2D((w / 2, h / 2), rotation, scale) + mat[:, 2] += (tx, ty) + result = cv2.warpAffine( + image, mat, (w, h), borderMode=cv2.BORDER_REPLICATE) + if flip_prob < random_flip: + result = result[:, ::-1] + images[i] = result.copy() + return images + + +if __name__ == "__main__": + dirr = '/FaceXray/dataset/utils/' + test_im = cv2.imread('{}test.png'.format(dirr)) + resample_res, fake_res = change_res(test_im) + cv2.imwrite('{}res_{}.png'.format(dirr, fake_res), resample_res) + aug_im = aug_one_im(test_im) + cv2.imwrite('{}auged.png'.format(dirr), aug_im) diff --git a/training/dataset/utils/face_blend.py b/training/dataset/utils/face_blend.py new file mode 100644 index 0000000000000000000000000000000000000000..1b12657f80e8d2305df3e9f86cc402ee2469924e --- /dev/null +++ b/training/dataset/utils/face_blend.py @@ -0,0 +1,469 @@ +''' +Create face mask and face boundary mask according to face landmarks, +so as to supervize the activation of Conv layer. +''' + +import os +import numpy as np +import cv2 +import dlib +import random +import argparse +from tqdm import tqdm +import time +from skimage import transform as trans +# from color_transfer import color_transfer +from .warp import gen_warp_params, warp_by_params, warp_mask + + +def crop_img_bbox(img, bbox, res, scale=1.3): + x, y, w, h = bbox + left, right = x, x+w + top, bottom = y, y+h + + H, W, C = img.shape + cx, cy = (left+right)//2, (top+bottom)//2 + w, h = (right-left)//2, (bottom-top)//2 + + x1 = max(0, int(cx-w*scale)) + x2 = min(W, int(cx+w*scale)) + y1 = max(0, int(cy-h*scale)) + y2 = min(H, int(cy+h*scale)) + + roi = img[y1:y2, x1:x2] + roi = cv2.resize(roi, (res, res)) + + return roi + + +def get_mask_center(mask): + l, t, w, h = cv2.boundingRect(mask[:, :, 0:1].astype(np.uint8)) + center = int(l+w/2), int(t+h/2) + return center + + +def get_5_keypoint(shape): + def get_point(idx): + # return [shape.part(idx).x, shape.part(idx).y] + return shape[idx] + + def center(pt1, pt2): + return [(pt1[0]+pt2[0])//2, (pt1[1]+pt2[1])//2] + + leye = np.array(center(get_point(36), get_point(39)), + dtype=int).reshape(-1, 2) + reye = np.array(center(get_point(45), get_point(42)), + dtype=int).reshape(-1, 2) + nose = np.array(get_point(30), dtype=int).reshape(-1, 2) + lmouth = np.array(get_point(48), + dtype=int).reshape(-1, 2) + rmouth = np.array(get_point(54), + dtype=int).reshape(-1, 2) + + pts = np.concatenate([leye, reye, nose, lmouth, rmouth], axis=0) + + return pts + + +def get_boundary(mask): + if len(mask.shape) == 3: + mask = mask[:, :, 0] + mask = cv2.GaussianBlur(mask, (3, 3), 0) + boundary = mask / 255. + boundary = 4*boundary*(1.-boundary) + return boundary + + +# def get_boundary(mask): +# if len(mask.shape) == 3: +# mask = mask[:, :, 0] +# mask = cv2.GaussianBlur(mask, (3, 3), 0) +# mask = mask.astype(np.uint8) + +# # Dilation and Erosion to find the boundary +# dilated = cv2.dilate(mask, None, iterations=1) +# boundary = cv2.subtract(dilated, mask) + +# # normalize the boundary to have values between 0 and 1 +# boundary = boundary / 255. + +# return boundary + + + +def blur_mask(mask): + blur_k = 2*np.random.randint(1, 10)-1 + + #kernel = np.ones((blur_k+1, blur_k+1), np.uint8) + #mask = cv2.erode(mask, kernel) + + mask = cv2.GaussianBlur(mask, (blur_k, blur_k), 0) + + + return mask + + +def random_deform(pt, tgt, scale=0.3): + x1, y1 = pt + x2, y2 = tgt + + x = x1+(x2-x1)*np.random.rand()*scale + y = y1+(y2-y1)*np.random.rand()*scale + #print('before:', pt, ' after:', [int(x), int(y)]) + return [int(x), int(y)] + + +def get_specific_mask(img, shape, mtype='mouth', random_side=False): + if mtype == 'eyes': + landmarks = shape[42:45] if random.choice([True, False]) else shape[36:39] + + elif mtype == 'nose': + landmarks = shape[27:35] + + elif mtype == 'mouth': + landmarks = shape[48:60] + + elif mtype == 'eyebrows': + landmarks = shape[22:26] if random.choice([True, False]) else shape[17:21] + + else: + raise ValueError(f"Invalid mtype. Choose from 'eyes', 'nose', 'mouth', or 'eyebrows', but got {mtype}") + + # find convex hull + hull = cv2.convexHull(landmarks) + hull = hull.astype(int) + + # mask + hull_mask = np.zeros_like(img) + cv2.fillPoly(hull_mask, [hull], (255, 255, 255)) + mask = hull_mask + return mask + + +def get_hull_mask(img, shape, mtype='hull'): + if mtype == 'normal-hull': + landmarks = np.array(shape) + + # find convex hull + hull = cv2.convexHull(landmarks) + hull = hull.astype(int) + + # full face mask + hull_mask = np.zeros_like(img) + cv2.fillPoly(hull_mask, [hull], (255, 255, 255)) + mask = hull_mask + + elif mtype == 'inner-hull': + landmarks = shape[17:] + landmarks = np.array(landmarks) + + # find convex hull + hull = cv2.convexHull(landmarks) + hull = hull.astype(int) + + # full face mask + hull_mask = np.zeros_like(img) + cv2.fillPoly(hull_mask, [hull], (255, 255, 255)) + + mask = hull_mask + + elif mtype == 'inner-hull-no-eyebrow': + landmarks = shape[27:] + landmarks = np.array(landmarks) + # find convex hull + hull = cv2.convexHull(landmarks) + hull = hull.astype(int) + + # full face mask + hull_mask = np.zeros_like(img) + cv2.fillPoly(hull_mask, [hull], (255, 255, 255)) + + mask = hull_mask + + elif mtype == 'mouth-hull': + landmarks = shape[2:15] + #landmarks.append(shape[29]) + landmarks = np.concatenate([landmarks, shape[29].reshape(1, -1)], axis=0) + + # find convex hull + hull = cv2.convexHull(landmarks) + hull = hull.astype(int) + + # full face mask + hull_mask = np.zeros_like(img) + cv2.fillPoly(hull_mask, [hull], (255, 255, 255)) + + # kernel = np.ones((2, 2), np.uint8) + # c_mask = cv2.dilate(hull_mask, kernel, iterations=1) + mask = hull_mask + + elif mtype == 'whole-hull': + face_height = shape[9][1] - shape[22][1] + landmarks = [] + for i in range(27): + lmk = shape[i] + if i >= 5 and i <= 11: + x, y = lmk[0], lmk[1] + lmk = [x, max(0, y+15)] + # lift the eyebrows to get a larger landmark convex hull + if i >= 18 and i <= 27: + x, y = lmk[0], lmk[1] + lmk = [x, max(0, y-face_height//4)] + + landmarks.append(lmk) + + # find convex hull + landmarks = np.array(landmarks, dtype=np.int32) + hull = cv2.convexHull(landmarks) + hull = np.reshape(hull, (1, -1, 2)) + + # full face mask + hull_mask = np.zeros_like(img) + cv2.fillPoly(hull_mask, [hull], (255, 255, 255)) + + # kernel = np.ones((2, 2), np.uint8) + # c_mask = cv2.dilate(hull_mask, kernel, iterations=1) + mask = hull_mask + ''' + elif mtype == 'rect': + cnt = [] + for idx in [5, 11, 17, 26]: + cnt.append(shape[idx]) + x, y, w, h = cv2.boundingRect(np.array(cnt)) + rect_mask = np.zeros_like(img) + cv2.rectangle(rect_mask, (x, y), (x+w, y+h), + (255, 255, 255), cv2.FILLED) + mask = rect_mask + ''' + return mask + + +def get_mask(shape, img, std=20, deform=True, restrict_mask=None): + mask_type = [ + 'normal-hull', + 'inner-hull', + 'inner-hull-no-eyebrow', + 'mouth-hull', + 'whole-hull' + ] + max_mask = get_hull_mask(img, shape, 'whole-hull') + if deform: + mtype = mask_type[np.random.randint(len(mask_type))] + if mtype == 'rect': + mask = get_hull_mask(img, shape, 'inner-hull-no-eyebrow') + x, y, w, h = cv2.boundingRect(mask[:,:,0]) + for i in range(y, y+h): + for j in range(x, x+w): + for k in range(mask.shape[2]): + mask[i, j, k] = 255 + else: + mask = get_hull_mask(img, shape, mtype) + + # random deform + if np.random.rand() < 0.9: + mask = warp_mask(mask, std=std) + + # # random erode/dilate + # prob = np.random.rand() + # if prob < 0.3: + # erode_k = 2*np.random.randint(1, 10)+1 + # kernel = np.ones((erode_k, erode_k), np.uint8) + # mask = cv2.erode(mask, kernel) + # elif prob < 0.6: + # erode_k = 2*np.random.randint(1, 10)+1 + # kernel = np.ones((erode_k, erode_k), np.uint8) + # mask = cv2.dilate(mask, kernel) + else: + mask = max_mask.copy() + + if restrict_mask is not None: + mask = mask*(restrict_mask//255) + + # restrict mask range + mask = mask *(max_mask//255) + + # random blur + if deform and np.random.rand() < 0.9: + mask = blur_mask(mask) + + return mask[:,:,0] + +def mask_postprocess(mask): + # random erode/dilate + prob = np.random.rand() + if prob < 0.3: + erode_k = 2*np.random.randint(1, 10)+1 + kernel = np.ones((erode_k, erode_k), np.uint8) + mask = cv2.erode(mask, kernel) + elif prob < 0.6: + erode_k = 2*np.random.randint(1, 10)+1 + kernel = np.ones((erode_k, erode_k), np.uint8) + mask = cv2.dilate(mask, kernel) + + # random blur + if np.random.rand() < 0.9: + mask = blur_mask(mask) + + return mask + + +def get_affine_param(from_, to_): + # use skimage tranformation + tform = trans.SimilarityTransform() + tform.estimate(from_.astype(np.float32), to_.astype( + np.float32)) # tform.estimate(from_, to_) + M = tform.params[0:2, :] + + return M + + +def random_sharpen_img(img): + cand = ['bsharpen', 'gsharpen'] # , 'none'] + mode = cand[np.random.randint(len(cand))] + # print('sharpen mode:', mode) + if mode == "bsharpen": + # Sharpening using filter2D + kernel = np.ones((3, 3)) * (-1) + kernel[1, 1] = 9 + #kernel /= 9. + out = cv2.filter2D(img, -1, kernel) + elif mode == "gsharpen": + # Sharpening using Weighted Method + gaussain_blur = cv2.GaussianBlur(img, (0, 0), 3.0) + out = cv2.addWeighted( + img, 1.5, gaussain_blur, -0.5, 0, img) + else: + out = img + + return out + + +def random_blur_img(img): + cand = ['avg', 'gaussion', 'med'] # , 'none'] + mode = cand[np.random.randint(len(cand))] + # print('blur mode:', mode) + ksize = 2*np.random.randint(1, 5)+1 + + if mode == 'avg': + # Averaging + out = cv2.blur(img, (ksize, ksize)) + elif mode == 'gaussion': + # Gaussian Blurring + out = cv2.GaussianBlur(img, (ksize, ksize), 0) + elif mode == 'med': + # Median blurring + out = cv2.medianBlur(img, ksize) + else: + out = img + # elif mode == 'bilateral' + # # Bilateral Filtering + # out = cv2.bilateralFilter(img,9,75,75) + + return out + + +def random_warp_img(img, prob=0.5): + H, W, C = img.shape + param = gen_warp_params(W, flip=False) + choice = [True, False] + + out = warp_by_params(param, img, + can_flip=False, # choice[np.random.randint(2)], + can_transform=False, # choice[np.random.randint(2)], + can_warp=(np.random.randint(10) < int(prob*10)), + border_replicate=choice[np.random.randint(2)]) + return out + + +def main(args): + np.random.seed(int(time.time())) + detector = dlib.get_frontal_face_detector() + landmark_predictor = dlib.shape_predictor(args.model) + + src_im = cv2.imread(args.src) + tgt_im = cv2.imread(args.tgt) + + H, W, C = tgt_im.shape + src_im = cv2.resize(src_im, (W, H)) + + def get_shape(img): + img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + dets = detector(img, 1) + det = dets[0] + shape = landmark_predictor(img, det) + + return shape, det + + src_shape, src_det = get_shape(src_im) + src_5_pts = get_5_keypoint(src_shape) + src_mask = get_mask(src_shape, src_im, whole=True, deform=False) + + tgt_shape, tgt_det = get_shape(tgt_im) + tgt_5_pts = get_5_keypoint(tgt_shape) + tgt_mask = get_mask(tgt_shape, tgt_im, whole=False, deform=True) + + #aff_param = get_affine_param(src_5_pts, tgt_5_pts) + + # color transfer: + mask = src_mask[:, :, 0:1]/255. + ct_modes = ['lct', 'rct', 'idt', 'idt-m', 'mkl', 'mkl-m', + 'sot', 'sot-m', 'mix-m'] # , 'seamless-hist-match'] + for mode in ct_modes: + colored_src = color_transfer(mode, src_im, tgt_im, mask) + cv2.imwrite('{}_colored.png'.format(mode), colored_src) + src_im = colored_src + + w1, h1 = src_det.right()-src_det.left(), src_det.bottom()-src_det.top() + w2, h2 = tgt_det.right()-tgt_det.left(), tgt_det.bottom()-tgt_det.top() + w_scale, h_scale = w2/w1, h2/h1 + + scaled_src = cv2.resize(src_im, (int(W*w_scale), int(H*h_scale))) + scaled_mask = cv2.resize(src_mask, (int(W*w_scale), int(H*h_scale))) + + src_5_pts[:, 0] = src_5_pts[:, 0]*w_scale + src_5_pts[:, 1] = src_5_pts[:, 1]*h_scale + aff_param = get_affine_param(src_5_pts, tgt_5_pts) + + aligned_src = cv2.warpAffine( + scaled_src, aff_param, (W, H), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REFLECT) + aligned_mask = cv2.warpAffine( + scaled_mask, aff_param, (W, H), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REFLECT) + + center = get_mask_center(aligned_mask) + print('mask center:', center) + # colored_src = transfer_color(aligned_src, tgt_im) + + init_blend = cv2.seamlessClone( + aligned_src, tgt_im, aligned_mask, center, cv2.NORMAL_CLONE) + cv2.imwrite('init_blended.png', init_blend) + + # aligned_blend = cv2.warpAffine( + # colored_blend, aff_param, (W, H), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REFLECT) + b_mask = tgt_mask[:, :, 0:1]/255. + out_blend = init_blend*b_mask + tgt_im*(1. - b_mask) + cv2.imwrite('out_blended.png', out_blend) + + res = 256 + blend_crop = crop_img_bbox(out_blend, tgt_det, res, scale=1.5) + mask_crop = crop_img_bbox(tgt_mask, tgt_det, res, scale=1.5) + boundary = get_boundary(mask_crop) + + cv2.imwrite('crop_blend.png', blend_crop) + cv2.imwrite('crop_mask.png', mask_crop) + cv2.imwrite('crop_bound.png', boundary*255) + + +if __name__ == "__main__": + p = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + p.add_argument('-s', '--src', type=str, + help='src image') + p.add_argument('-t', '--tgt', type=str, + help='tgt image') + p.add_argument('--model', type=str, default='/data1/yuchen/download/face_landmark/shape_predictor_68_face_landmarks.dat', + help="path to downloaded detector") + args = p.parse_args() + print(args) + + main(args) diff --git a/training/dataset/utils/faceswap.py b/training/dataset/utils/faceswap.py new file mode 100644 index 0000000000000000000000000000000000000000..0dbcb1db91e3d12ab7b0cd82ab0950ef05ad2c82 --- /dev/null +++ b/training/dataset/utils/faceswap.py @@ -0,0 +1,249 @@ +''' +code from https://github.com/wuhuikai/FaceSwap/blob/master/face_swap.py +''' + + + +#! /usr/bin/env python +import cv2 +import numpy as np +import scipy.spatial as spatial +import logging + + +## 3D Transform +def bilinear_interpolate(img, coords): + """ Interpolates over every image channel + http://en.wikipedia.org/wiki/Bilinear_interpolation + :param img: max 3 channel image + :param coords: 2 x _m_ array. 1st row = xcoords, 2nd row = ycoords + :returns: array of interpolated pixels with same shape as coords + """ + int_coords = np.int32(coords) + x0, y0 = int_coords + x0[x0>254] = 254 + y0[y0>254] = 254 + dx, dy = coords - int_coords + + # 4 Neighour pixels + q11 = img[y0, x0] + q21 = img[y0, x0 + 1] + q12 = img[y0 + 1, x0] + q22 = img[y0 + 1, x0 + 1] + + btm = q21.T * dx + q11.T * (1 - dx) + top = q22.T * dx + q12.T * (1 - dx) + inter_pixel = top * dy + btm * (1 - dy) + + return inter_pixel.T + +def grid_coordinates(points): + """ x,y grid coordinates within the ROI of supplied points + :param points: points to generate grid coordinates + :returns: array of (x, y) coordinates + """ + xmin = np.min(points[:, 0]) + xmax = np.max(points[:, 0]) + 1 + ymin = np.min(points[:, 1]) + ymax = np.max(points[:, 1]) + 1 + + return np.asarray([(x, y) for y in range(ymin, ymax) + for x in range(xmin, xmax)], np.uint32) + + +def process_warp(src_img, result_img, tri_affines, dst_points, delaunay): + """ + Warp each triangle from the src_image only within the + ROI of the destination image (points in dst_points). + """ + roi_coords = grid_coordinates(dst_points) + # indices to vertices. -1 if pixel is not in any triangle + roi_tri_indices = delaunay.find_simplex(roi_coords) + + for simplex_index in range(len(delaunay.simplices)): + coords = roi_coords[roi_tri_indices == simplex_index] + num_coords = len(coords) + out_coords = np.dot(tri_affines[simplex_index], + np.vstack((coords.T, np.ones(num_coords)))) + x, y = coords.T + x[x>255] = 255 + y[y>255] = 255 + result_img[y, x] = bilinear_interpolate(src_img, out_coords) + + return None + + +def triangular_affine_matrices(vertices, src_points, dst_points): + """ + Calculate the affine transformation matrix for each + triangle (x,y) vertex from dst_points to src_points + :param vertices: array of triplet indices to corners of triangle + :param src_points: array of [x, y] points to landmarks for source image + :param dst_points: array of [x, y] points to landmarks for destination image + :returns: 2 x 3 affine matrix transformation for a triangle + """ + ones = [1, 1, 1] + for tri_indices in vertices: + #print(tri_indices) + src_tri = np.vstack((src_points[tri_indices, :].T, ones)) + dst_tri = np.vstack((dst_points[tri_indices, :].T, ones)) + mat = np.dot(src_tri, np.linalg.inv(dst_tri))[:2, :] + yield mat + + +def warp_image_3d(src_img, src_points, dst_points, dst_shape, dtype=np.uint8): + rows, cols = dst_shape[:2] + result_img = np.zeros((rows, cols, 3), dtype=dtype) + + delaunay = spatial.Delaunay(dst_points) + tri_affines = np.asarray(list(triangular_affine_matrices( + delaunay.simplices, src_points, dst_points))) + + process_warp(src_img, result_img, tri_affines, dst_points, delaunay) + + return result_img + + +## 2D Transform +def transformation_from_points(points1, points2): + points1 = points1.astype(np.float64) + points2 = points2.astype(np.float64) + + c1 = np.mean(points1, axis=0) + c2 = np.mean(points2, axis=0) + points1 -= c1 + points2 -= c2 + + s1 = np.std(points1) + s2 = np.std(points2) + points1 /= s1 + points2 /= s2 + + U, S, Vt = np.linalg.svd(np.dot(points1.T, points2)) + R = (np.dot(U, Vt)).T + + return np.vstack([np.hstack([s2 / s1 * R, + (c2.T - np.dot(s2 / s1 * R, c1.T))[:, np.newaxis]]), + np.array([[0., 0., 1.]])]) + + +def warp_image_2d(im, M, dshape): + output_im = np.zeros(dshape, dtype=im.dtype) + cv2.warpAffine(im, + M[:2], + (dshape[1], dshape[0]), + dst=output_im, + borderMode=cv2.BORDER_TRANSPARENT, + flags=cv2.WARP_INVERSE_MAP) + + return output_im + + +## Generate Mask +def mask_from_points(size, points,erode_flag=1): + radius = 10 # kernel size + kernel = np.ones((radius, radius), np.uint8) + + mask = np.zeros(size, np.uint8) + cv2.fillConvexPoly(mask, cv2.convexHull(points), 255) + if erode_flag: + mask = cv2.erode(mask, kernel,iterations=1) + + return mask + + +## Color Correction +def correct_colours(im1, im2, landmarks1): + COLOUR_CORRECT_BLUR_FRAC = 0.75 + LEFT_EYE_POINTS = list(range(42, 48)) + RIGHT_EYE_POINTS = list(range(36, 42)) + + blur_amount = COLOUR_CORRECT_BLUR_FRAC * np.linalg.norm( + np.mean(landmarks1[LEFT_EYE_POINTS], axis=0) - + np.mean(landmarks1[RIGHT_EYE_POINTS], axis=0)) + blur_amount = int(blur_amount) + if blur_amount % 2 == 0: + blur_amount += 1 + im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0) + im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0) + + # Avoid divide-by-zero errors. + im2_blur = im2_blur.astype(int) + im2_blur += 128*(im2_blur <= 1) + + result = im2.astype(np.float64) * im1_blur.astype(np.float64) / im2_blur.astype(np.float64) + result = np.clip(result, 0, 255).astype(np.uint8) + + return result + + +## Copy-and-paste +def apply_mask(img, mask): + """ Apply mask to supplied image + :param img: max 3 channel image + :param mask: [0-255] values in mask + :returns: new image with mask applied + """ + masked_img=cv2.bitwise_and(img,img,mask=mask) + + return masked_img + + +## Alpha blending +def alpha_feathering(src_img, dest_img, img_mask, blur_radius=15): + mask = cv2.blur(img_mask, (blur_radius, blur_radius)) + mask = mask / 255.0 + + result_img = np.empty(src_img.shape, np.uint8) + for i in range(3): + result_img[..., i] = src_img[..., i] * mask + dest_img[..., i] * (1-mask) + + return result_img + + +def check_points(img,points): + # Todo: I just consider one situation. + if points[8,1]>img.shape[0]: + logging.error("Jaw part out of image") + else: + return True + return False + + +def face_swap(src_face, dst_face, src_points, dst_points, dst_shape, dst_img, args, end=48): + h, w = dst_face.shape[:2] + + ## 3d warp + warped_src_face = warp_image_3d(src_face, src_points[:end], dst_points[:end], (h, w)) + ## Mask for blending + mask = mask_from_points((h, w), dst_points) + mask_src = np.mean(warped_src_face, axis=2) > 0 + mask = np.asarray(mask * mask_src, dtype=np.uint8) + ## Correct color + if args.correct_color: + warped_src_face = apply_mask(warped_src_face, mask) + dst_face_masked = apply_mask(dst_face, mask) + warped_src_face = correct_colours(dst_face_masked, warped_src_face, dst_points) + ## 2d warp + if args.warp_2d: + unwarped_src_face = warp_image_3d(warped_src_face, dst_points[:end], src_points[:end], src_face.shape[:2]) + warped_src_face = warp_image_2d(unwarped_src_face, transformation_from_points(dst_points, src_points), + (h, w, 3)) + + mask = mask_from_points((h, w), dst_points) + mask_src = np.mean(warped_src_face, axis=2) > 0 + mask = np.asarray(mask * mask_src, dtype=np.uint8) + + ## Shrink the mask + kernel = np.ones((10, 10), np.uint8) + mask = cv2.erode(mask, kernel, iterations=1) + ##Poisson Blending + r = cv2.boundingRect(mask) + center = ((r[0] + int(r[2] / 2), r[1] + int(r[3] / 2))) + output = cv2.seamlessClone(warped_src_face, dst_face, mask, center, cv2.NORMAL_CLONE) + + x, y, w, h = dst_shape + dst_img_cp = dst_img.copy() + dst_img_cp[y:y + h, x:x + w] = output + + return dst_img_cp diff --git a/training/dataset/utils/faceswap_utils.py b/training/dataset/utils/faceswap_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..33b944a90b015a6f329d054285aa67e927b61b32 --- /dev/null +++ b/training/dataset/utils/faceswap_utils.py @@ -0,0 +1,63 @@ +import numpy as np +import cv2 + +def AlphaBlend(foreground, background, alpha): + # Convert uint8 to float + foreground = foreground.astype(float) + background = background.astype(float) + + # Normalize the alpha mask to keep intensity between 0 and 1 + alpha = alpha.astype(float)/255 + if len(alpha.shape) < 3: + alpha = np.expand_dims(alpha, 2) + outImage = alpha * foreground + (1.-alpha) * background + outImage = np.clip(outImage, 0, 255).astype(np.uint8) + + return outImage + +def blendImages(src, dst, mask, featherAmount=0.1): + maskIndices = np.where(mask != 0) + maskPts = np.hstack( + (maskIndices[1][:, np.newaxis], maskIndices[0][:, np.newaxis])) + faceSize = np.max(maskPts, axis=0) - np.min(maskPts, axis=0) + featherAmount = 0.2 + + hull = cv2.convexHull(maskPts) + #hull = hull.astype(np.uint64) + dists = np.zeros(maskPts.shape[0]) + for i in range(maskPts.shape[0]): + point = (maskPts[i, 0], maskPts[i, 1]) + """ + The third paprameter can be set as "True" for more visually diverse images. + We use "False" to add imperceptible image patterns to synthesize new images. + """ + point_x, point_y = point + dists[i] = cv2.pointPolygonTest(hull, (int(point_x),int(point_y)), False) + + weights = np.clip(dists / featherAmount, 0, 1) + composedImg = np.copy(dst) + composedImg[maskIndices[0], maskIndices[1]] = weights[:, np.newaxis] * \ + src[maskIndices[0], maskIndices[1]] + \ + (1 - weights[:, np.newaxis]) * \ + dst[maskIndices[0], maskIndices[1]] + newMask = np.zeros_like(dst).astype(np.float32) + newMask[maskIndices[0], maskIndices[1]] = weights[:, np.newaxis] + + return composedImg, newMask + + +def colorTransfer(src_, dst_, mask): + src = dst_ + dst = src_ + transferredDst = np.copy(dst) + maskIndices = np.where(mask != 0) + maskedSrc = src[maskIndices[0], maskIndices[1]].astype(np.int32) + maskedDst = dst[maskIndices[0], maskIndices[1]].astype(np.int32) + meanSrc = np.mean(maskedSrc, axis=0) + meanDst = np.mean(maskedDst, axis=0) + maskedDst = maskedDst - meanDst + maskedDst = maskedDst + meanSrc + maskedDst = np.clip(maskedDst, 0, 255) + transferredDst[maskIndices[0], maskIndices[1]] = maskedDst + + return transferredDst diff --git a/training/dataset/utils/faceswap_utils_sladd.py b/training/dataset/utils/faceswap_utils_sladd.py new file mode 100644 index 0000000000000000000000000000000000000000..c277a8862ee0a304106626fecff2ba3aaae8c04e --- /dev/null +++ b/training/dataset/utils/faceswap_utils_sladd.py @@ -0,0 +1,62 @@ +import numpy as np +import cv2 + +def AlphaBlend(foreground, background, alpha): + # Convert uint8 to float + foreground = foreground.astype(float) + background = background.astype(float) + + # Normalize the alpha mask to keep intensity between 0 and 1 + alpha = alpha.astype(float)/255 + if len(alpha.shape) < 3: + alpha = np.expand_dims(alpha, 2) + outImage = alpha * foreground + (1.-alpha) * background + outImage = np.clip(outImage, 0, 255).astype(np.uint8) + + return outImage + +def blendImages(src, dst, mask, featherAmount=0.1): + maskIndices = np.where(mask != 0) + maskPts = np.hstack( + (maskIndices[1][:, np.newaxis], maskIndices[0][:, np.newaxis])) + faceSize = np.max(maskPts, axis=0) - np.min(maskPts, axis=0) + featherAmount = featherAmount * np.max(faceSize) + + hull = cv2.convexHull(maskPts) + #hull = hull.astype(np.uint64) + dists = np.zeros(maskPts.shape[0]) + for i in range(maskPts.shape[0]): + point = (int(maskPts[i, 0]), int(maskPts[i, 1])) + """ + The third paprameter can be set as "True" for more visually diverse images. + We use "False" to add imperceptible image patterns to synthesize new images. + """ + dists[i] = cv2.pointPolygonTest(hull, point, False) + + weights = np.clip(dists / featherAmount, 0, 1) + composedImg = np.copy(dst) + composedImg[maskIndices[0], maskIndices[1]] = weights[:, np.newaxis] * \ + src[maskIndices[0], maskIndices[1]] + \ + (1 - weights[:, np.newaxis]) * \ + dst[maskIndices[0], maskIndices[1]] + newMask = np.zeros_like(dst).astype(np.float32) + newMask[maskIndices[0], maskIndices[1]] = weights[:, np.newaxis] + + return composedImg, newMask + + +def colorTransfer(src_, dst_, mask): + src = dst_ + dst = src_ + transferredDst = np.copy(dst) + maskIndices = np.where(mask != 0) + maskedSrc = src[maskIndices[0], maskIndices[1]].astype(np.int32) + maskedDst = dst[maskIndices[0], maskIndices[1]].astype(np.int32) + meanSrc = np.mean(maskedSrc, axis=0) + meanDst = np.mean(maskedDst, axis=0) + maskedDst = maskedDst - meanDst + maskedDst = maskedDst + meanSrc + maskedDst = np.clip(maskedDst, 0, 255) + transferredDst[maskIndices[0], maskIndices[1]] = maskedDst + + return transferredDst diff --git a/training/dataset/utils/image_ae.py b/training/dataset/utils/image_ae.py new file mode 100644 index 0000000000000000000000000000000000000000..ee0e80701e3b0897a07430191c8ca618f5657a67 --- /dev/null +++ b/training/dataset/utils/image_ae.py @@ -0,0 +1,135 @@ +from torch import nn +from torch.autograd import Variable +import torch +import torch.nn.functional as F + +import torchvision.models as models + +def add_gaussian_noise(ins, mean=0, stddev=0.1): + noise = ins.data.new(ins.size()).normal_(mean, stddev) + return ins + noise + +class FlattenLayer(nn.Module): + def __init__(self): + super(FlattenLayer, self).__init__() + + def forward(self, x): + return x.view(x.size(0), -1) + + +class UnflattenLayer(nn.Module): + def __init__(self, width): + super(UnflattenLayer, self).__init__() + self.width = width + + def forward(self, x): + return x.view(x.size(0), -1, self.width, self.width) + +class VAE_Encoder(nn.Module): + ''' + VAE_Encoder: Encode image into std and logvar + ''' + + def __init__(self, latent_dim=256): + super(VAE_Encoder, self).__init__() + self.resnet = models.resnet18(pretrained=True) + self.resnet.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.resnet = nn.Sequential( + *list(self.resnet.children())[:-1], + FlattenLayer() + ) + + self.l_mu = nn.Linear(512, latent_dim) + self.l_var = nn.Linear(512, latent_dim) + + def encode(self, x): + hidden = self.resnet(x) + mu = self.l_mu(hidden) + logvar = self.l_var(hidden) + return mu, logvar + + def reparameterize(self, mu, logvar): + if self.training: + std = torch.exp(0.5*logvar) + eps = torch.randn_like(std) + return mu + eps*std + + else: + return mu + + def forward(self, x): + mu, logvar = self.encode(x) + z = self.reparameterize(mu, logvar) + return z, mu, logvar + + +class VAE_Decoder(nn.Module): + ''' + VAE_Decoder: Decode noise to image + ''' + + def __init__(self, latent_dim, output_dim=3): + super(VAE_Decoder, self).__init__() + self.convs = nn.Sequential( + UnflattenLayer(width=1), + nn.ConvTranspose2d(latent_dim, 512, 4, 1, 0, bias=False), + nn.ReLU(inplace=True), + nn.ConvTranspose2d(512, 384, 4, 2, 1, bias=False), + nn.BatchNorm2d(384), + nn.ReLU(inplace=True), + nn.ConvTranspose2d(384, 192, 4, 2, 1, bias=False), + nn.BatchNorm2d(192), + nn.ReLU(inplace=True), + nn.ConvTranspose2d(192, 96, 4, 2, 1, bias=False), + nn.BatchNorm2d(96), + nn.ReLU(inplace=True), + nn.ConvTranspose2d(96, 64, 4, 2, 1, bias=False), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True), + nn.ConvTranspose2d(64, 32, 4, 2, 1, bias=False), + nn.BatchNorm2d(32), + nn.LeakyReLU(inplace=True), + nn.ConvTranspose2d(32, 3, 4, 2, 1, bias=False), + nn.Tanh() + ) + + def forward(self, z): + return self.convs(z) + +class ImageAE(nn.Module): + # VAE architecture + def __init__(self): + super(ImageAE, self).__init__() + latent_dim = 512 + self.enc = VAE_Encoder(latent_dim) + self.dec = VAE_Decoder(latent_dim) + + def forward(self, x): + z, *_ = self.enc(x) + out = self.dec(z) + + return out + + def load_ckpt(self, enc_path, dec_path): + self.enc.load_state_dict(torch.load(enc_path, map_location='cpu')) + self.dec.load_state_dict(torch.load(dec_path, map_location='cpu')) + + +def get_pretraiend_ae(enc_path='pretrained/ae/vae/enc.pth', dec_path='pretrained/ae/vae/dec1.pth'): + ae = ImageAE() + ae.load_ckpt(enc_path, dec_path) + print('load image auto-encoder') + ae.eval() + return ae + +# from networks.pix2pix_network import UnetGenerator +def get_pretraiend_unet(path='pretrained/ae/unet/ckpt_srm.pth'): + unet = UnetGenerator(3, 3, 8) + unet.load_state_dict(torch.load(path, map_location='cpu')) + print('load Unet') + unet.eval() + return unet + +if __name__ == "__main__": + ae = get_pretraiend_ae() + print(ae) diff --git a/training/dataset/utils/umeyama.py b/training/dataset/utils/umeyama.py new file mode 100644 index 0000000000000000000000000000000000000000..a83548491f16e5e740c1144b9e181fe1587fb5bc --- /dev/null +++ b/training/dataset/utils/umeyama.py @@ -0,0 +1,84 @@ +## License (Modified BSD) +## Copyright (C) 2011, the scikit-image team All rights reserved. +## +## Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: +## +## Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +## Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +## Neither the name of skimage nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. +## THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# umeyama function from scikit-image/skimage/transform/_geometric.py + +import numpy as np + + +def umeyama(src, dst, estimate_scale): + """Estimate N-D similarity transformation with or without scaling. + Parameters + ---------- + src : (M, N) array + Source coordinates. + dst : (M, N) array + Destination coordinates. + estimate_scale : bool + Whether to estimate scaling factor. + Returns + ------- + T : (N + 1, N + 1) + The homogeneous similarity transformation matrix. The matrix contains + NaN values only if the problem is not well-conditioned. + References + ---------- + .. [1] "Least-squares estimation of transformation parameters between two + point patterns", Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573 + """ + + num = src.shape[0] + dim = src.shape[1] + + # Compute mean of src and dst. + src_mean = src.mean(axis=0) + dst_mean = dst.mean(axis=0) + + # Subtract mean from src and dst. + src_demean = src - src_mean + dst_demean = dst - dst_mean + + # Eq. (38). + A = np.dot(dst_demean.T, src_demean) / num + + # Eq. (39). + d = np.ones((dim,), dtype=np.double) + if np.linalg.det(A) < 0: + d[dim - 1] = -1 + + T = np.eye(dim + 1, dtype=np.double) + + U, S, V = np.linalg.svd(A) + + # Eq. (40) and (43). + rank = np.linalg.matrix_rank(A) + if rank == 0: + return np.nan * T + elif rank == dim - 1: + if np.linalg.det(U) * np.linalg.det(V) > 0: + T[:dim, :dim] = np.dot(U, V) + else: + s = d[dim - 1] + d[dim - 1] = -1 + T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V)) + d[dim - 1] = s + else: + T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V.T)) + + if estimate_scale: + # Eq. (41) and (42). + scale = 1.0 / src_demean.var(axis=0).sum() * np.dot(S, d) + else: + scale = 1.0 + + T[:dim, dim] = dst_mean - scale * np.dot(T[:dim, :dim], src_mean.T) + T[:dim, :dim] *= scale + + return T diff --git a/training/dataset/utils/warp.py b/training/dataset/utils/warp.py new file mode 100644 index 0000000000000000000000000000000000000000..437e58f43e9b15476eda06c7aedbec425e259aa0 --- /dev/null +++ b/training/dataset/utils/warp.py @@ -0,0 +1,111 @@ +import numpy as np +import cv2 +# from core import randomex + + +def random_normal(size=(1,), trunc_val=2.5): + len = np.array(size).prod() + result = np.empty((len,), dtype=np.float32) + + for i in range(len): + while True: + x = np.random.normal() + if x >= -trunc_val and x <= trunc_val: + break + result[i] = (x / trunc_val) + + return result.reshape(size) + + +def gen_warp_params(w, flip, rotation_range=[-10, 10], scale_range=[-0.5, 0.5], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05], rnd_state=None): + if rnd_state is None: + rnd_state = np.random + + rotation = rnd_state.uniform(rotation_range[0], rotation_range[1]) + scale = rnd_state.uniform(1 + scale_range[0], 1 + scale_range[1]) + tx = rnd_state.uniform(tx_range[0], tx_range[1]) + ty = rnd_state.uniform(ty_range[0], ty_range[1]) + p_flip = flip and rnd_state.randint(10) < 4 + + # random warp by grid + cell_size = [w // (2**i) for i in range(1, 4)][rnd_state.randint(3)] + cell_count = w // cell_size + 1 + + grid_points = np.linspace(0, w, cell_count) + mapx = np.broadcast_to(grid_points, (cell_count, cell_count)).copy() + mapy = mapx.T + + mapx[1:-1, 1:-1] = mapx[1:-1, 1:-1] + \ + random_normal( + size=(cell_count-2, cell_count-2))*(cell_size*0.24) + mapy[1:-1, 1:-1] = mapy[1:-1, 1:-1] + \ + random_normal( + size=(cell_count-2, cell_count-2))*(cell_size*0.24) + + half_cell_size = cell_size // 2 + + mapx = cv2.resize(mapx, (w+cell_size,)*2)[ + half_cell_size:-half_cell_size-1, half_cell_size:-half_cell_size-1].astype(np.float32) + mapy = cv2.resize(mapy, (w+cell_size,)*2)[ + half_cell_size:-half_cell_size-1, half_cell_size:-half_cell_size-1].astype(np.float32) + + # random transform + random_transform_mat = cv2.getRotationMatrix2D( + (w // 2, w // 2), rotation, scale) + random_transform_mat[:, 2] += (tx*w, ty*w) + + params = dict() + params['mapx'] = mapx + params['mapy'] = mapy + params['rmat'] = random_transform_mat + params['w'] = w + params['flip'] = p_flip + + return params + + +def warp_by_params(params, img, can_warp, can_transform, can_flip, border_replicate, cv2_inter=cv2.INTER_CUBIC): + if can_warp: + img = cv2.remap(img, params['mapx'], params['mapy'], cv2_inter) + if can_transform: + img = cv2.warpAffine(img, params['rmat'], (params['w'], params['w']), borderMode=( + cv2.BORDER_REPLICATE if border_replicate else cv2.BORDER_CONSTANT), flags=cv2_inter) + if len(img.shape) == 2: + img = img[..., None] + if can_flip and params['flip']: + img = img[:, ::-1, ...] + return img + +from skimage.transform import PiecewiseAffineTransform, warp +def random_deform(imageSize, nrows, ncols, mean=0, std=5): + try: + h, w, c = imageSize + except: + h, w = imageSize + c = 1 + rows = np.linspace(0, h, nrows).astype(np.int32) + cols = np.linspace(0, w, ncols).astype(np.int32) + rows, cols = np.meshgrid(rows, cols) + anchors = np.vstack([rows.flat, cols.flat]).T + assert anchors.shape[1] == 2 and anchors.shape[0] == ncols * nrows + deformed = anchors + np.random.normal(mean, std, size=anchors.shape) + #print(anchors) + #print(deformed) + np.clip(deformed[:,0], 0, h-1, deformed[:,0]) + np.clip(deformed[:,1], 0, w-1, deformed[:,1]) + return anchors.astype(np.float32), deformed.astype(np.float32) + + +def piecewise_affine_transform(image, srcAnchor, tgtAnchor): + trans = PiecewiseAffineTransform() + trans.estimate(srcAnchor, tgtAnchor) + # tform.estimate(from_.astype(np.float32), to_.astype( + # np.float32)) # tform.estimate(from_, to_) + # M = tform.params[0:2, :] + warped = warp(image, trans) + return warped + +def warp_mask(mask, std): + ach, tgt_ach = random_deform(mask.shape, 4, 4, std=std) + warped_mask = piecewise_affine_transform(mask, ach, tgt_ach) + return (warped_mask*255).astype(np.uint8) diff --git a/training/detectors/__init__.py b/training/detectors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bc5db184a0049ddaf51510252deb0014a5af45e0 --- /dev/null +++ b/training/detectors/__init__.py @@ -0,0 +1,15 @@ +import os +import sys +current_file_path = os.path.abspath(__file__) +parent_dir = os.path.dirname(os.path.dirname(current_file_path)) +project_root_dir = os.path.dirname(parent_dir) +sys.path.append(parent_dir) +sys.path.append(project_root_dir) + +from metrics.registry import DETECTOR +from .utils import slowfast + +from .xception_detector import XceptionDetector +from .efficientnetb4_detector import EfficientDetector +from .resnet34_detector import ResnetDetector +from .ucf_detector import UCFDetector diff --git a/training/detectors/base_detector.py b/training/detectors/base_detector.py new file mode 100644 index 0000000000000000000000000000000000000000..b240972b16a301c456d8836809e99b84a82e6af0 --- /dev/null +++ b/training/detectors/base_detector.py @@ -0,0 +1,71 @@ +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2023-0706 +# description: Abstract Class for the Deepfake Detector + +import abc +import torch +import torch.nn as nn +from typing import Union + +class AbstractDetector(nn.Module, metaclass=abc.ABCMeta): + """ + All deepfake detectors should subclass this class. + """ + def __init__(self, config=None, load_param: Union[bool, str] = False): + """ + config: (dict) + configurations for the model + load_param: (False | True | Path(str)) + False Do not read; True Read the default path; Path Read the required path + """ + super().__init__() + + @abc.abstractmethod + def features(self, data_dict: dict) -> torch.tensor: + """ + Returns the features from the backbone given the input data. + """ + pass + + @abc.abstractmethod + def forward(self, data_dict: dict, inference=False) -> dict: + """ + Forward pass through the model, returning the prediction dictionary. + """ + pass + + @abc.abstractmethod + def classifier(self, features: torch.tensor) -> torch.tensor: + """ + Classifies the features into classes. + """ + pass + + @abc.abstractmethod + def build_backbone(self, config): + """ + Builds the backbone of the model. + """ + pass + + @abc.abstractmethod + def build_loss(self, config): + """ + Builds the loss function for the model. + """ + pass + + @abc.abstractmethod + def get_losses(self, data_dict: dict, pred_dict: dict) -> dict: + """ + Returns the losses for the model. + """ + pass + + @abc.abstractmethod + def get_train_metrics(self, data_dict: dict, pred_dict: dict) -> dict: + """ + Returns the training metrics for the model. + """ + pass diff --git a/training/detectors/efficientnetb4_detector.py b/training/detectors/efficientnetb4_detector.py new file mode 100644 index 0000000000000000000000000000000000000000..43d104e40b56a28d8116e37629370e1dd50b2a33 --- /dev/null +++ b/training/detectors/efficientnetb4_detector.py @@ -0,0 +1,114 @@ +''' +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2023-0706 +# description: Class for the EfficientDetector + +Functions in the Class are summarized as: +1. __init__: Initialization +2. build_backbone: Backbone-building +3. build_loss: Loss-function-building +4. features: Feature-extraction +5. classifier: Classification +6. get_losses: Loss-computation +7. get_train_metrics: Training-metrics-computation +8. get_test_metrics: Testing-metrics-computation +9. forward: Forward-propagation + +Reference: +@inproceedings{tan2019efficientnet, + title={Efficientnet: Rethinking model scaling for convolutional neural networks}, + author={Tan, Mingxing and Le, Quoc}, + booktitle={International conference on machine learning}, + pages={6105--6114}, + year={2019}, + organization={PMLR} +} +''' + +import os +import datetime +import logging +import numpy as np +from sklearn import metrics +from typing import Union +from collections import defaultdict + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torch.nn import DataParallel +from torch.utils.tensorboard import SummaryWriter + +from metrics.base_metrics_class import calculate_metrics_for_train + +from .base_detector import AbstractDetector +from detectors import DETECTOR +from networks import BACKBONE +from loss import LOSSFUNC +import random + +logger = logging.getLogger(__name__) + +@DETECTOR.register_module(module_name='efficientnetb4') +class EfficientDetector(AbstractDetector): + def __init__(self, config): + super().__init__() + self.config = config + self.backbone = self.build_backbone(config) + self.loss_func = self.build_loss(config) + + def build_backbone(self, config): + # prepare the backbone + backbone_class = BACKBONE[config['backbone_name']] + model_config = config['backbone_config'] + model_config['pretrained'] = self.config['pretrained'] + backbone = backbone_class(model_config) + if config['pretrained'] != 'None': + logger.info('Load pretrained model successfully!') + else: + logger.info('No pretrained model.') + return backbone + return backbone + + def build_loss(self, config): + # prepare the loss function + loss_class = LOSSFUNC[config['loss_func']] + loss_func = loss_class() + return loss_func + + def features(self, data_dict: dict) -> torch.tensor: + x = self.backbone.features(data_dict['image']) + return x + + def classifier(self, features: torch.tensor) -> torch.tensor: + return self.backbone.classifier(features) + + def get_losses(self, data_dict: dict, pred_dict: dict) -> dict: + label = data_dict['label'] + pred = pred_dict['cls'] + loss = self.loss_func(pred, label) + loss_dict = {'overall': loss} + return loss_dict + + def get_train_metrics(self, data_dict: dict, pred_dict: dict) -> dict: + label = data_dict['label'] + pred = pred_dict['cls'] + # compute metrics for batch data + auc, eer, acc, ap = calculate_metrics_for_train(label.detach(), pred.detach()) + metric_batch_dict = {'acc': acc, 'auc': auc, 'eer': eer, 'ap': ap} + return metric_batch_dict + + def forward(self, data_dict: dict, inference=False) -> dict: + # get the features by backbone + features = self.features(data_dict) + # get the prediction by classifier + pred = self.classifier(features) + # get the probability of the pred + prob = torch.softmax(pred, dim=1)[:, 1] + # build the prediction dict for each output + pred_dict = {'cls': pred, 'prob': prob, 'feat': features} + + return pred_dict + diff --git a/training/detectors/multi_attention_detector.py b/training/detectors/multi_attention_detector.py new file mode 100644 index 0000000000000000000000000000000000000000..a044d44b04df344e1074b6bec023be0959e0c96d --- /dev/null +++ b/training/detectors/multi_attention_detector.py @@ -0,0 +1,473 @@ +""" +# author: Kangran ZHAO +# email: kangranzhao@link.cuhk.edu.cn +# date: 2024-0401 +# description: Class for the Multi-attention Detector + +Functions in the Class are summarized as: +1. __init__: Initialization +2. build_backbone: Backbone-building +3. build_loss: Loss-function-building +4. features: Feature-extraction +5. classifier: Classification +6. get_losses: Loss-computation +7. get_train_metrics: Training-metrics-computation +8. get_test_metrics: Testing-metrics-computation +9. forward: Forward-propagation + +Reference: +@INPROCEEDINGS{9577592, + author={Zhao, Hanqing and Wei, Tianyi and Zhou, Wenbo and Zhang, Weiming and Chen, Dongdong and Yu, Nenghai}, + booktitle={2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + title={Multi-attentional Deepfake Detection}, + year={2021}, + volume={}, + number={}, + pages={2185-2194}, + keywords={Measurement;Semantics;Feature extraction;Forgery;Pattern recognition;Feeds;Task analysis}, + doi={10.1109/CVPR46437.2021.00222} + } + +Codes are modified based on GitHub repo https://github.com/yoctta/multiple-attention +""" + +import random + +import kornia +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from detectors import DETECTOR +from loss import LOSSFUNC +from metrics.base_metrics_class import calculate_metrics_for_train +from networks import BACKBONE +from sklearn import metrics + +from .base_detector import AbstractDetector + + +@DETECTOR.register_module(module_name='multi_attention') +class MultiAttentionDetector(AbstractDetector): + def __init__(self, config): + super().__init__() + self.config = config + self.block_layer = {"b1": 1, "b2": 5, "b3": 9, "b4": 15, "b5": 21, "b6": 29, "b7": 31} + self.mid_dim = config["mid_dim"] + self.backbone = self.build_backbone(config) + self.loss_func = self.build_loss(config) + self.batch_cnt = 0 + + with torch.no_grad(): + layer_outputs = self.features({"image": torch.zeros(1, 3, config["resolution"], config["resolution"])}) + + self.feature_layer = config["feature_layer"] + self.attention_layer = config["attention_layer"] + self.num_classes = config["backbone_config"]["num_classes"] + self.num_shallow_features = layer_outputs[self.feature_layer].shape[1] + self.num_attention_features = layer_outputs[self.attention_layer].shape[1] + self.num_final_features = layer_outputs["final"].shape[1] + self.num_attentions = config["num_attentions"] + + self.AGDA = AGDA(kernel_size=config["AGDA"]["kernel_size"], + dilation=config["AGDA"]["dilation"], + sigma=config["AGDA"]["sigma"], + threshold=config["AGDA"]["threshold"], + zoom=config["AGDA"]["zoom"], + scale_factor=config["AGDA"]["scale_factor"], + noise_rate=config["AGDA"]["noise_rate"]) + + self.attention_generation = AttentionMap(self.num_attention_features, self.num_attentions) + self.attention_pooling = AttentionPooling() + self.texture_enhance = TextureEnhanceV1(self.num_shallow_features, self.num_attentions) # Todo + self.num_enhanced_features = self.texture_enhance.output_features + self.num_features_d = self.texture_enhance.output_features_d + self.projection_local = nn.Sequential(nn.Linear(self.num_attentions * self.num_enhanced_features, self.mid_dim), + nn.Hardswish(), + nn.Linear(self.mid_dim, self.mid_dim), + nn.Hardswish()) + self.projection_final = nn.Sequential(nn.Linear(self.num_final_features, self.mid_dim), + nn.Hardswish()) + self.ensemble_classifier_fc = nn.Sequential(nn.Linear(self.mid_dim * 2, self.mid_dim), + nn.Hardswish(), + nn.Linear(self.mid_dim, self.num_classes)) + self.dropout = nn.Dropout(config["dropout_rate"], inplace=True) + self.dropout_final = nn.Dropout(config["dropout_rate_final"], inplace=True) + + def build_backbone(self, config): + backbone_class = BACKBONE[config['backbone_name']] + model_config = config['backbone_config'] + model_config['pretrained'] = self.config.get('pretrained', None) + backbone = backbone_class(model_config) + + return backbone + + def build_loss(self, config): + cls_loss_class = LOSSFUNC[config["loss_func"]["cls_loss"]] + ril_loss_class = LOSSFUNC[config["loss_func"]["ril_loss"]] + cls_loss_func = cls_loss_class() + ril_loss_func = ril_loss_class(M=config["num_attentions"], + N=config["loss_func"]["ril_params"]["N"], + alpha=config["loss_func"]["ril_params"]["alpha"], + alpha_decay=config["loss_func"]["ril_params"]["alpha_decay"], + decay_batch=config["batch_per_epoch"], + inter_margin=config["loss_func"]["ril_params"]["inter_margin"], + intra_margin=config["loss_func"]["ril_params"]["intra_margin"]) + + return {"cls": cls_loss_func, "ril": ril_loss_func, "weights": config["loss_func"]["weights"]} + + def features(self, data_dict: dict) -> torch.tensor: + x = data_dict["image"] + layer_output = {} + for name, module in self.backbone.efficientnet.named_children(): + if name == "_avg_pooling": + layer_output["final"] = x + break + elif name != "_blocks": + x = module(x) + else: + for i in range(len(module)): + x = module[i](x) + if i == self.block_layer["b1"]: + layer_output["b1"] = x + elif i == self.block_layer["b2"]: + layer_output["b2"] = x + elif i == self.block_layer["b3"]: + layer_output["b3"] = x + elif i == self.block_layer["b4"]: + layer_output["b4"] = x + elif i == self.block_layer["b5"]: + layer_output["b5"] = x + elif i == self.block_layer["b6"]: + layer_output["b6"] = x + elif i == self.block_layer["b7"]: + layer_output["b7"] = x + + x = F.adaptive_avg_pool2d(x, (1, 1)) + x = x.view(x.size(0), -1) + layer_output["logit"] = self.backbone.last_layer(x) + + return layer_output + + def classifier(self, features: torch.tensor) -> torch.tensor: + pass # do not overwrite this, since classifier structure has been written in self.forward() + + def get_losses(self, data_dict: dict, pred_dict: dict) -> dict: + if self.batch_cnt <= self.config["backbone_nEpochs"] * self.config["batch_per_epoch"]: + label = data_dict["label"] + pred = pred_dict["cls"] + ce_loss = self.loss_func["cls"](pred, label) + + return {"overall": ce_loss, "ce_loss": ce_loss} + else: + label = data_dict["label"] + pred = pred_dict["cls"] + feature_maps_d = pred_dict["feature_maps_d"] + attention_maps = pred_dict["attentions"] + + ce_loss = self.loss_func["cls"](pred, label) + ril_loss = self.loss_func["ril"](feature_maps_d, attention_maps, label) + weights = self.loss_func["weights"] + over_all_loss = weights[0] * ce_loss + weights[1] * ril_loss + + return {"overall": over_all_loss, "ce_loss": ce_loss, "ril_loss": ril_loss} + + def get_train_metrics(self, data_dict: dict, pred_dict: dict) -> dict: + label = data_dict['label'] + pred = pred_dict['cls'] + auc, eer, acc, ap = calculate_metrics_for_train(label.detach(), pred.detach()) + metric_batch_dict = {'acc': acc, 'auc': auc, 'eer': eer, 'ap': ap} + + return metric_batch_dict + + def get_train_metrics(self, data_dict: dict, pred_dict: dict) -> dict: + label = data_dict['label'] + pred = pred_dict['cls'] + auc, eer, acc, ap = calculate_metrics_for_train(label.detach(), pred.detach()) + metric_batch_dict = {'acc': acc, 'auc': auc, 'eer': eer, 'ap': ap} + + return metric_batch_dict + + def forward(self, data_dict: dict, inference=False) -> dict: + self.batch_cnt += 1 + if self.batch_cnt <= self.config["backbone_nEpochs"] * self.config["batch_per_epoch"]: + layer_output = self.features(data_dict) + pred = layer_output["logit"] + prob = torch.softmax(pred, dim=1)[:, 1] + pred_dict = {"cls": pred, + "prob": prob, + "feat": layer_output["final"]} + + else: + if not inference: # use AGDA when training + with torch.no_grad(): + layer_output = self.features(data_dict) + raw_attentions = layer_output[self.attention_layer] + attention_maps = self.attention_generation(raw_attentions) + data_dict["image"], _ = self.AGDA.agda(data_dict["image"], attention_maps) + + # Get Attention Maps + layer_output = self.features(data_dict) + raw_attentions = layer_output[self.attention_layer] + attention_maps = self.attention_generation(raw_attentions) + + # Get Textural Feature Matrix P + shallow_features = layer_output[self.feature_layer] + enhanced_features, feature_maps_d = self.texture_enhance(shallow_features, attention_maps) + textural_feature_matrix_p = self.attention_pooling(enhanced_features, attention_maps) + B, M, N = textural_feature_matrix_p.size() + feature_matrix = self.dropout(textural_feature_matrix_p).view(B, -1) + feature_matrix = self.projection_local(feature_matrix) + + # Get Global Feature G + final = layer_output["final"] + attention_maps2 = attention_maps.sum(dim=1, keepdim=True) # [B, 1, H_A, W_A] + final = self.attention_pooling(final, attention_maps2, norm=1).squeeze(1) # [B, C_F] + final = self.projection_final(final) + final = F.hardswish(final) + + # Get the Prediction by Ensemble Classifier + feature_matrix = torch.cat((feature_matrix, final), dim=1) # [B, 2 * mid_dim] + pred = self.ensemble_classifier_fc(feature_matrix) # [B, 2] + + # Get probability + prob = torch.softmax(pred, dim=1)[:, 1] + + pred_dict = {"cls": pred, + "prob": prob, + "feat": layer_output["final"], + "attentions": attention_maps, + "feature_maps_d": feature_maps_d} + + return pred_dict + + +class AttentionMap(nn.Module): + def __init__(self, in_channels, num_attention): + super(AttentionMap, self).__init__() + self.register_buffer('mask', torch.zeros([1, 1, 24, 24])) + self.mask[0, 0, 2:-2, 2:-2] = 1 + self.num_attentions = num_attention + self.conv_extract = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1) + self.bn1 = nn.BatchNorm2d(in_channels) + self.conv2 = nn.Conv2d(in_channels, num_attention, kernel_size=1, bias=False) + self.bn2 = nn.BatchNorm2d(num_attention) + + def forward(self, x): + """ + Convert deep feature to attention map + Args: + x: extracted features + Returns: + attention_maps: conventionally 4 attention maps + """ + if self.num_attentions == 0: + return torch.ones([x.shape[0], 1, 1, 1], device=x.device) + + x = self.conv_extract(x) + x = self.bn1(x) + x = F.relu(x, inplace=True) + x = self.conv2(x) + x = self.bn2(x) + x = F.elu(x) + 1 + mask = F.interpolate(self.mask, (x.shape[2], x.shape[3]), mode='nearest') + + return x * mask + + +class AttentionPooling(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, features, attentions, norm=2): + """ + Bilinear Attention Pooing, when used for + Args: + features: [Tensor in [B, C_F, H_F, W_F]] extracted feature maps, either shallow ones or deep ones ??? + attentions: [Tensor in [B, M, H, W]] attention maps, conventionally 4 attention maps (M = 4) + norm: [int, default=2] 1 for deep features, 2 for shallow features + Returns: + feature_matrix: [Tensor in [B, M, C_F] or [B, M, 1]] P (shallow feature) or G (deep feature) ??? + """ + feature_size = features.size()[-2:] + attention_size = attentions.size()[-2:] + if feature_size != attention_size: + attentions = F.interpolate(attentions, size=feature_size, mode='bilinear', align_corners=True) + + if len(features.shape) == 4: + # In TextureEnhanceV1, in accordance with paper + feature_matrix = torch.einsum('imjk,injk->imn', attentions, features) # [B, M, C_F] + else: + # In TextureEnhanceV2 + feature_matrix = torch.einsum('imjk,imnjk->imn', attentions, features) + + if norm == 1: # Used for deep feature BAP + w = torch.sum(attentions + 1e-8, dim=(2, 3)).unsqueeze(-1) + feature_matrix /= w + elif norm == 2: # Used for shallow feature BAP + feature_matrix = F.normalize(feature_matrix, p=2, dim=-1) + + return feature_matrix + + +class TextureEnhanceV1(nn.Module): + def __init__(self, num_features, num_attentions): + super().__init__() + # self.output_features=num_features + self.output_features = num_features * 4 + self.output_features_d = num_features + self.conv0 = nn.Conv2d(num_features, num_features, 1) + self.conv1 = nn.Conv2d(num_features, num_features, 3, padding=1) + self.bn1 = nn.BatchNorm2d(num_features) + self.conv2 = nn.Conv2d(num_features * 2, num_features, 3, padding=1) + self.bn2 = nn.BatchNorm2d(2 * num_features) + self.conv3 = nn.Conv2d(num_features * 3, num_features, 3, padding=1) + self.bn3 = nn.BatchNorm2d(3 * num_features) + self.conv_last = nn.Conv2d(num_features * 4, num_features * 4, 1) + self.bn4 = nn.BatchNorm2d(4 * num_features) + self.bn_last = nn.BatchNorm2d(num_features * 4) + + def forward(self, feature_maps, attention_maps=(1, 1)): + """ + Texture Enhancement Block V1, in accordance with description in paper + 1. Local average pooling. + 2. Residual local features. + 3. Dense Net + Args: + feature_maps: [Tensor in [B, C', H', W']] extracted shallow features + attention_maps: [Tensor in [B, M, H_A, W_A]] calculated attention maps, or + [Tuple with two float elements] local average grid scale, + used for conduct local average pooling, local patch size is decided by attention map size. + Returns: + feature_maps: [Tensor in [B, C_1, H_1, W_1]] enhanced feature maps + feature_maps_d: [Tensor in [B, C', H_A, W_A]] textural information + + """ + B, N, H, W = feature_maps.shape + if type(attention_maps) == tuple: + attention_size = (int(H * attention_maps[0]), int(W * attention_maps[1])) + else: + attention_size = (attention_maps.shape[2], attention_maps.shape[3]) + feature_maps_d = F.adaptive_avg_pool2d(feature_maps, attention_size) + feature_maps = feature_maps - F.interpolate(feature_maps_d, (feature_maps.shape[2], feature_maps.shape[3]), + mode='nearest') + feature_maps0 = self.conv0(feature_maps) + feature_maps1 = self.conv1(F.relu(self.bn1(feature_maps0), inplace=True)) + feature_maps1_ = torch.cat([feature_maps0, feature_maps1], dim=1) + feature_maps2 = self.conv2(F.relu(self.bn2(feature_maps1_), inplace=True)) + feature_maps2_ = torch.cat([feature_maps1_, feature_maps2], dim=1) + feature_maps3 = self.conv3(F.relu(self.bn3(feature_maps2_), inplace=True)) + feature_maps3_ = torch.cat([feature_maps2_, feature_maps3], dim=1) + feature_maps = self.bn_last(self.conv_last(F.relu(self.bn4(feature_maps3_), inplace=True))) + return feature_maps, feature_maps_d + + +class TextureEnhanceV2(nn.Module): + def __init__(self, num_features, num_attentions): + super().__init__() + self.output_features = num_features + self.output_features_d = num_features + self.conv_extract = nn.Conv2d(num_features, num_features, 3, padding=1) + self.conv0 = nn.Conv2d(num_features * num_attentions, num_features * num_attentions, 5, padding=2, + groups=num_attentions) + self.conv1 = nn.Conv2d(num_features * num_attentions, num_features * num_attentions, 3, padding=1, + groups=num_attentions) + self.bn1 = nn.BatchNorm2d(num_features * num_attentions) + self.conv2 = nn.Conv2d(num_features * 2 * num_attentions, num_features * num_attentions, 3, padding=1, + groups=num_attentions) + self.bn2 = nn.BatchNorm2d(2 * num_features * num_attentions) + self.conv3 = nn.Conv2d(num_features * 3 * num_attentions, num_features * num_attentions, 3, padding=1, + groups=num_attentions) + self.bn3 = nn.BatchNorm2d(3 * num_features * num_attentions) + self.conv_last = nn.Conv2d(num_features * 4 * num_attentions, num_features * num_attentions, 1, + groups=num_attentions) + self.bn4 = nn.BatchNorm2d(4 * num_features * num_attentions) + self.bn_last = nn.BatchNorm2d(num_features * num_attentions) + + self.M = num_attentions + + def cat(self, a, b): + B, C, H, W = a.shape + c = torch.cat([a.reshape(B, self.M, -1, H, W), b.reshape(B, self.M, -1, H, W)], dim=2).reshape(B, -1, H, W) + return c + + def forward(self, feature_maps, attention_maps=(1, 1)): + """ + Args: + feature_maps: [Tensor in [B, N, H, W]] extracted feature maps from shallow layer + attention_maps: [Tensor in [B, M, H_A, W_A] or float of (H_ratio, W_ratio)] either extracted attention maps + or average pooling down-sampling ratio + Returns: + feature_maps, feature_maps_d: [Tensor in [B, M, N, H, W], Tensor in [B, N, H, W]] feature maps after dense + network and non-textural feature map D + """ + B, N, H, W = feature_maps.shape + if type(attention_maps) == tuple: + attention_size = (int(H * attention_maps[0]), int(W * attention_maps[1])) + else: + attention_size = (attention_maps.shape[2], attention_maps.shape[3]) + feature_maps = self.conv_extract(feature_maps) + feature_maps_d = F.adaptive_avg_pool2d(feature_maps, attention_size) + if feature_maps.size(2) > feature_maps_d.size(2): + feature_maps = feature_maps - F.interpolate(feature_maps_d, (feature_maps.shape[2], feature_maps.shape[3]), + mode='nearest') + attention_maps = ( + torch.tanh(F.interpolate(attention_maps.detach(), (H, W), mode='bilinear', align_corners=True))).unsqueeze( + 2) if type(attention_maps) != tuple else 1 + feature_maps = feature_maps.unsqueeze(1) + feature_maps = (feature_maps * attention_maps).reshape(B, -1, H, W) + feature_maps0 = self.conv0(feature_maps) + feature_maps1 = self.conv1(F.relu(self.bn1(feature_maps0), inplace=True)) + feature_maps1_ = self.cat(feature_maps0, feature_maps1) + feature_maps2 = self.conv2(F.relu(self.bn2(feature_maps1_), inplace=True)) + feature_maps2_ = self.cat(feature_maps1_, feature_maps2) + feature_maps3 = self.conv3(F.relu(self.bn3(feature_maps2_), inplace=True)) + feature_maps3_ = self.cat(feature_maps2_, feature_maps3) + feature_maps = F.relu(self.bn_last(self.conv_last(F.relu(self.bn4(feature_maps3_), inplace=True))), + inplace=True) + feature_maps = feature_maps.reshape(B, -1, N, H, W) + return feature_maps, feature_maps_d + + +class AGDA(nn.Module): + def __init__(self, kernel_size, dilation, sigma, threshold, zoom, scale_factor, noise_rate): + super().__init__() + self.kernel_size = kernel_size + self.dilation = dilation + self.sigma = sigma + self.noise_rate = noise_rate + self.scale_factor = scale_factor + self.threshold = threshold + self.zoom = zoom + self.filter = kornia.filters.GaussianBlur2d((self.kernel_size, self.kernel_size), (self.sigma, self.sigma)) + + def mod_func(self, x): + threshold = random.uniform(*self.threshold) if type(self.threshold) == list else self.threshold + zoom = random.uniform(*self.zoom) if type(self.zoom) == list else self.zoom + bottom = torch.sigmoid((torch.tensor(0.) - threshold) * zoom) + + return (torch.sigmoid((x - threshold) * zoom) - bottom) / (1 - bottom) + + def soft_drop2(self, x, attention_map): + with torch.no_grad(): + attention_map = self.mod_func(attention_map) + B, C, H, W = x.size() + xs = F.interpolate(x, scale_factor=self.scale_factor, mode='bilinear', align_corners=True) + xs = self.filter(xs) + xs += torch.randn_like(xs) * self.noise_rate + xs = F.interpolate(xs, (H, W), mode='bilinear', align_corners=True) + x = x * (1 - attention_map) + xs * attention_map + return x + + def agda(self, X, attention_map): + with torch.no_grad(): + attention_weight = torch.sum(attention_map, dim=(2, 3)) + attention_map = F.interpolate(attention_map, (X.size(2), X.size(3)), mode="bilinear", align_corners=True) + attention_weight = torch.sqrt(attention_weight + 1) + index = torch.distributions.categorical.Categorical(attention_weight).sample() + index1 = index.view(-1, 1, 1, 1).repeat(1, 1, X.size(2), X.size(3)) + attention_map = torch.gather(attention_map, 1, index1) + atten_max = torch.max(attention_map.view(attention_map.shape[0], 1, -1), 2)[0] + 1e-8 + attention_map = attention_map / atten_max.view(attention_map.shape[0], 1, 1, 1) + + return self.soft_drop2(X, attention_map), index diff --git a/training/detectors/resnet34_detector.py b/training/detectors/resnet34_detector.py new file mode 100644 index 0000000000000000000000000000000000000000..97dcae7941178aef036cfd45752dca5505848c1a --- /dev/null +++ b/training/detectors/resnet34_detector.py @@ -0,0 +1,113 @@ +''' +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2023-0706 +# description: Class for the ResnetDetector + +Functions in the Class are summarized as: +1. __init__: Initialization +2. build_backbone: Backbone-building +3. build_loss: Loss-function-building +4. features: Feature-extraction +5. classifier: Classification +6. get_losses: Loss-computation +7. get_train_metrics: Training-metrics-computation +8. get_test_metrics: Testing-metrics-computation +9. forward: Forward-propagation + +Reference: +@inproceedings{wang2020cnn, + title={CNN-generated images are surprisingly easy to spot... for now}, + author={Wang, Sheng-Yu and Wang, Oliver and Zhang, Richard and Owens, Andrew and Efros, Alexei A}, + booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition}, + pages={8695--8704}, + year={2020} +} + +Notes: +We chose to use ResNet-34 as the backbone instead of ResNet-50 because the number of parameters in ResNet-34 is relatively similar to that of Xception. This similarity allows us to make a more meaningful and fair comparison between different architectures. +''' + +import os +import datetime +import logging +import numpy as np +from sklearn import metrics +from typing import Union +from collections import defaultdict + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torch.nn import DataParallel +from torch.utils.tensorboard import SummaryWriter + +from metrics.base_metrics_class import calculate_metrics_for_train + +from .base_detector import AbstractDetector +from detectors import DETECTOR +from networks import BACKBONE +from loss import LOSSFUNC + +logger = logging.getLogger(__name__) + +@DETECTOR.register_module(module_name='resnet34') +class ResnetDetector(AbstractDetector): + def __init__(self, config): + super().__init__() + self.config = config + self.backbone = self.build_backbone(config) + self.loss_func = self.build_loss(config) + + def build_backbone(self, config): + # prepare the backbone + backbone_class = BACKBONE[config['backbone_name']] + model_config = config['backbone_config'] + backbone = backbone_class(model_config) + #FIXME: current load pretrained weights only from the backbone, not here + # # if donot load the pretrained weights, fail to get good results + # state_dict = torch.load(config['pretrained']) + # state_dict = {'resnet.'+k:v for k, v in state_dict.items() if 'fc' not in k} + # backbone.load_state_dict(state_dict, False) + # logger.info('Load pretrained model successfully!') + return backbone + + def build_loss(self, config): + # prepare the loss function + loss_class = LOSSFUNC[config['loss_func']] + loss_func = loss_class() + return loss_func + + def features(self, data_dict: dict) -> torch.tensor: + return self.backbone.features(data_dict['image']) + + def classifier(self, features: torch.tensor) -> torch.tensor: + return self.backbone.classifier(features) + + def get_losses(self, data_dict: dict, pred_dict: dict) -> dict: + label = data_dict['label'] + pred = pred_dict['cls'] + loss = self.loss_func(pred, label) + loss_dict = {'overall': loss} + return loss_dict + + def get_train_metrics(self, data_dict: dict, pred_dict: dict) -> dict: + label = data_dict['label'] + pred = pred_dict['cls'] + # compute metrics for batch data + auc, eer, acc, ap = calculate_metrics_for_train(label.detach(), pred.detach()) + metric_batch_dict = {'acc': acc, 'auc': auc, 'eer': eer, 'ap': ap} + return metric_batch_dict + + def forward(self, data_dict: dict, inference=False) -> dict: + # get the features by backbone + features = self.features(data_dict) + # get the prediction by classifier + pred = self.classifier(features) + # get the probability of the pred + prob = torch.softmax(pred, dim=1)[:, 1] + # build the prediction dict for each output + pred_dict = {'cls': pred, 'prob': prob, 'feat': features} + return pred_dict + diff --git a/training/detectors/ucf_detector.py b/training/detectors/ucf_detector.py new file mode 100644 index 0000000000000000000000000000000000000000..c45ddcf07522f5b62329e54eb04f90ec90251f00 --- /dev/null +++ b/training/detectors/ucf_detector.py @@ -0,0 +1,466 @@ +''' +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2023-0706 +# description: Class for the UCFDetector + +Functions in the Class are summarized as: +1. __init__: Initialization +2. build_backbone: Backbone-building +3. build_loss: Loss-function-building +4. features: Feature-extraction +5. classifier: Classification +6. get_losses: Loss-computation +7. get_train_metrics: Training-metrics-computation +8. get_test_metrics: Testing-metrics-computation +9. forward: Forward-propagation + +Reference: +@article{yan2023ucf, + title={UCF: Uncovering Common Features for Generalizable Deepfake Detection}, + author={Yan, Zhiyuan and Zhang, Yong and Fan, Yanbo and Wu, Baoyuan}, + journal={arXiv preprint arXiv:2304.13949}, + year={2023} +} +''' + +import os +import datetime +import logging +import random +import numpy as np +from sklearn import metrics +from typing import Union +from collections import defaultdict + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torch.nn import DataParallel +from torch.utils.tensorboard import SummaryWriter + +from metrics.base_metrics_class import calculate_metrics_for_train + +from .base_detector import AbstractDetector +from detectors import DETECTOR +from networks import BACKBONE +from loss import LOSSFUNC + +logger = logging.getLogger(__name__) + +@DETECTOR.register_module(module_name='ucf') +class UCFDetector(AbstractDetector): + def __init__(self, config): + super().__init__() + self.config = config + self.num_classes = config['backbone_config']['num_classes'] + self.encoder_feat_dim = config['encoder_feat_dim'] + self.half_fingerprint_dim = self.encoder_feat_dim//2 + + self.encoder_f = self.build_backbone(config) + self.encoder_c = self.build_backbone(config) + + self.loss_func = self.build_loss(config) + self.prob, self.label = [], [] + self.correct, self.total = 0, 0 + + # basic function + self.lr = nn.LeakyReLU(inplace=True) + self.do = nn.Dropout(0.2) + self.pool = nn.AdaptiveAvgPool2d(1) + + # conditional gan + self.con_gan = Conditional_UNet() + + # head + specific_task_number = len(config['train_dataset']) + 1 # default: 5 in FF++ + self.head_spe = Head( + in_f=self.half_fingerprint_dim, + hidden_dim=self.encoder_feat_dim, + out_f=specific_task_number + ) + self.head_sha = Head( + in_f=self.half_fingerprint_dim, + hidden_dim=self.encoder_feat_dim, + out_f=self.num_classes + ) + self.block_spe = Conv2d1x1( + in_f=self.encoder_feat_dim, + hidden_dim=self.half_fingerprint_dim, + out_f=self.half_fingerprint_dim + ) + self.block_sha = Conv2d1x1( + in_f=self.encoder_feat_dim, + hidden_dim=self.half_fingerprint_dim, + out_f=self.half_fingerprint_dim + ) + + def build_backbone(self, config): + # prepare the backbone + backbone_class = BACKBONE[config['backbone_name']] + model_config = config['backbone_config'] + backbone = backbone_class(model_config) + # if donot load the pretrained weights, fail to get good results + state_dict = torch.load(config['pretrained']) + for name, weights in state_dict.items(): + if 'pointwise' in name: + state_dict[name] = weights.unsqueeze(-1).unsqueeze(-1) + state_dict = {k:v for k, v in state_dict.items() if 'fc' not in k} + backbone.load_state_dict(state_dict, False) + logger.info('Load pretrained model successfully!') + return backbone + + def build_loss(self, config): + cls_loss_class = LOSSFUNC[config['loss_func']['cls_loss']] + spe_loss_class = LOSSFUNC[config['loss_func']['spe_loss']] + con_loss_class = LOSSFUNC[config['loss_func']['con_loss']] + rec_loss_class = LOSSFUNC[config['loss_func']['rec_loss']] + cls_loss_func = cls_loss_class() + spe_loss_func = spe_loss_class() + con_loss_func = con_loss_class(margin=3.0) + rec_loss_func = rec_loss_class() + loss_func = { + 'cls': cls_loss_func, + 'spe': spe_loss_func, + 'con': con_loss_func, + 'rec': rec_loss_func, + } + return loss_func + + def features(self, data_dict: dict) -> torch.tensor: + cat_data = data_dict['image'] + # encoder + f_all = self.encoder_f.features(cat_data) + c_all = self.encoder_c.features(cat_data) + feat_dict = {'forgery': f_all, 'content': c_all} + return feat_dict + + def classifier(self, features: torch.tensor) -> torch.tensor: + # classification, multi-task + # split the features into the specific and common forgery + f_spe = self.block_spe(features) + f_share = self.block_sha(features) + return f_spe, f_share + + def get_losses(self, data_dict: dict, pred_dict: dict) -> dict: + if 'label_spe' in data_dict and 'recontruction_imgs' in pred_dict: + return self.get_train_losses(data_dict, pred_dict) + else: # test mode + return self.get_test_losses(data_dict, pred_dict) + + def get_train_losses(self, data_dict: dict, pred_dict: dict) -> dict: + # get combined, real, fake imgs + cat_data = data_dict['image'] + real_img, fake_img = cat_data.chunk(2, dim=0) + # get the reconstruction imgs + reconstruction_image_1, \ + reconstruction_image_2, \ + self_reconstruction_image_1, \ + self_reconstruction_image_2 \ + = pred_dict['recontruction_imgs'] + # get label + label = data_dict['label'] + label_spe = data_dict['label_spe'] + # get pred + pred = pred_dict['cls'] + pred_spe = pred_dict['cls_spe'] + + # 1. classification loss for common features + loss_sha = self.loss_func['cls'](pred, label) + + # 2. classification loss for specific features + loss_spe = self.loss_func['spe'](pred_spe, label_spe) + + # 3. reconstruction loss + self_loss_reconstruction_1 = self.loss_func['rec'](fake_img, self_reconstruction_image_1) + self_loss_reconstruction_2 = self.loss_func['rec'](real_img, self_reconstruction_image_2) + cross_loss_reconstruction_1 = self.loss_func['rec'](fake_img, reconstruction_image_2) + cross_loss_reconstruction_2 = self.loss_func['rec'](real_img, reconstruction_image_1) + loss_reconstruction = \ + self_loss_reconstruction_1 + self_loss_reconstruction_2 + \ + cross_loss_reconstruction_1 + cross_loss_reconstruction_2 + + # 4. constrative loss + common_features = pred_dict['feat'] + specific_features = pred_dict['feat_spe'] + loss_con = self.loss_func['con'](common_features, specific_features, label_spe) + + # 5. total loss + loss = loss_sha + 0.1*loss_spe + 0.3*loss_reconstruction + 0.05*loss_con + loss_dict = { + 'overall': loss, + 'common': loss_sha, + 'specific': loss_spe, + 'reconstruction': loss_reconstruction, + 'contrastive': loss_con, + } + return loss_dict + + def get_test_losses(self, data_dict: dict, pred_dict: dict) -> dict: + # get label + label = data_dict['label'] + # get pred + pred = pred_dict['cls'] + # for test mode, only classification loss for common features + loss = self.loss_func['cls'](pred, label) + loss_dict = {'common': loss} + return loss_dict + + def get_train_metrics(self, data_dict: dict, pred_dict: dict) -> dict: + def get_accracy(label, output): + _, prediction = torch.max(output, 1) # argmax + correct = (prediction == label).sum().item() + accuracy = correct / prediction.size(0) + return accuracy + + # get pred and label + label = data_dict['label'] + pred = pred_dict['cls'] + label_spe = data_dict['label_spe'] + pred_spe = pred_dict['cls_spe'] + + # compute metrics for batch data + auc, eer, acc, ap = calculate_metrics_for_train(label.detach(), pred.detach()) + acc_spe = get_accracy(label_spe.detach(), pred_spe.detach()) + metric_batch_dict = {'acc': acc, 'acc_spe': acc_spe, 'auc': auc, 'eer': eer, 'ap': ap} + # we dont compute the video-level metrics for training + return metric_batch_dict + + def forward(self, data_dict: dict, inference=False) -> dict: + # split the features into the content and forgery + features = self.features(data_dict) + forgery_features, content_features = features['forgery'], features['content'] + # get the prediction by classifier (split the common and specific forgery) + f_spe, f_share = self.classifier(forgery_features) + + if inference: + # inference only consider share loss + out_sha, sha_feat = self.head_sha(f_share) + out_spe, spe_feat = self.head_spe(f_spe) + prob_sha = torch.softmax(out_sha, dim=1)[:, 1] + self.prob.append( + prob_sha + .detach() + .squeeze() + .cpu() + .numpy() + ) + self.label.append( + data_dict['label'] + .detach() + .squeeze() + .cpu() + .numpy() + ) + # deal with acc + _, prediction_class = torch.max(out_sha, 1) + common_label = (data_dict['label'] >= 1) + correct = (prediction_class == common_label).sum().item() + self.correct += correct + self.total += data_dict['label'].size(0) + + pred_dict = {'cls': out_sha, 'feat': sha_feat} + return pred_dict + + bs = f_share.size(0) + # using idx aug in the training mode + aug_idx = random.random() + if aug_idx < 0.7: + # real + idx_list = list(range(0, bs//2)) + random.shuffle(idx_list) + f_share[0: bs//2] = f_share[idx_list] + # fake + idx_list = list(range(bs//2, bs)) + random.shuffle(idx_list) + f_share[bs//2: bs] = f_share[idx_list] + + # concat spe and share to obtain new_f_all + f_all = torch.cat((f_spe, f_share), dim=1) + + # reconstruction loss + f2, f1 = f_all.chunk(2, dim=0) + c2, c1 = content_features.chunk(2, dim=0) + + # ==== self reconstruction ==== # + # f1 + c1 -> f11, f11 + c1 -> near~I1 + self_reconstruction_image_1 = self.con_gan(f1, c1) + + # f2 + c2 -> f2, f2 + c2 -> near~I2 + self_reconstruction_image_2 = self.con_gan(f2, c2) + + # ==== cross combine ==== # + reconstruction_image_1 = self.con_gan(f1, c2) + reconstruction_image_2 = self.con_gan(f2, c1) + + # head for spe and sha + out_spe, spe_feat = self.head_spe(f_spe) + out_sha, sha_feat = self.head_sha(f_share) + + # get the probability of the pred + prob_sha = torch.softmax(out_sha, dim=1)[:, 1] + prob_spe = torch.softmax(out_spe, dim=1)[:, 1] + + # build the prediction dict for each output + pred_dict = { + 'cls': out_sha, + 'prob': prob_sha, + 'feat': sha_feat, + 'cls_spe': out_spe, + 'prob_spe': prob_spe, + 'feat_spe': spe_feat, + 'feat_content': content_features, + 'recontruction_imgs': ( + reconstruction_image_1, + reconstruction_image_2, + self_reconstruction_image_1, + self_reconstruction_image_2 + ) + } + return pred_dict + +def sn_double_conv(in_channels, out_channels): + return nn.Sequential( + nn.utils.spectral_norm( + nn.Conv2d(in_channels, in_channels, 3, padding=1)), + nn.utils.spectral_norm( + nn.Conv2d(in_channels, out_channels, 3, padding=1, stride=2)), + nn.LeakyReLU(0.2, inplace=True) + ) + +def r_double_conv(in_channels, out_channels): + return nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(out_channels, out_channels, 3, padding=1), + nn.ReLU(inplace=True) + ) + +class AdaIN(nn.Module): + def __init__(self, eps=1e-5): + super().__init__() + self.eps = eps + # self.l1 = nn.Linear(num_classes, in_channel*4, bias=True) #bias is good :) + + def c_norm(self, x, bs, ch, eps=1e-7): + # assert isinstance(x, torch.cuda.FloatTensor) + x_var = x.var(dim=-1) + eps + x_std = x_var.sqrt().view(bs, ch, 1, 1) + x_mean = x.mean(dim=-1).view(bs, ch, 1, 1) + return x_std, x_mean + + def forward(self, x, y): + assert x.size(0)==y.size(0) + size = x.size() + bs, ch = size[:2] + x_ = x.view(bs, ch, -1) + y_ = y.reshape(bs, ch, -1) + x_std, x_mean = self.c_norm(x_, bs, ch, eps=self.eps) + y_std, y_mean = self.c_norm(y_, bs, ch, eps=self.eps) + out = ((x - x_mean.expand(size)) / x_std.expand(size)) \ + * y_std.expand(size) + y_mean.expand(size) + return out + +class Conditional_UNet(nn.Module): + + def init_weight(self, std=0.2): + for m in self.modules(): + cn = m.__class__.__name__ + if cn.find('Conv') != -1: + m.weight.data.normal_(0., std) + elif cn.find('Linear') != -1: + m.weight.data.normal_(1., std) + m.bias.data.fill_(0) + + def __init__(self): + super(Conditional_UNet, self).__init__() + + self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) + self.maxpool = nn.MaxPool2d(2) + self.dropout = nn.Dropout(p=0.3) + #self.dropout_half = HalfDropout(p=0.3) + + self.adain3 = AdaIN() + self.adain2 = AdaIN() + self.adain1 = AdaIN() + + self.dconv_up3 = r_double_conv(512, 256) + self.dconv_up2 = r_double_conv(256, 128) + self.dconv_up1 = r_double_conv(128, 64) + + self.conv_last = nn.Conv2d(64, 3, 1) + self.up_last = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=True) + self.activation = nn.Tanh() + #self.init_weight() + + def forward(self, c, x): # c is the style and x is the content + x = self.adain3(x, c) + x = self.upsample(x) + x = self.dropout(x) + x = self.dconv_up3(x) + c = self.upsample(c) + c = self.dropout(c) + c = self.dconv_up3(c) + + x = self.adain2(x, c) + x = self.upsample(x) + x = self.dropout(x) + x = self.dconv_up2(x) + c = self.upsample(c) + c = self.dropout(c) + c = self.dconv_up2(c) + + x = self.adain1(x, c) + x = self.upsample(x) + x = self.dropout(x) + x = self.dconv_up1(x) + + x = self.conv_last(x) + out = self.up_last(x) + + return self.activation(out) + +class MLP(nn.Module): + def __init__(self, in_f, hidden_dim, out_f): + super(MLP, self).__init__() + self.pool = nn.AdaptiveAvgPool2d(1) + self.mlp = nn.Sequential(nn.Linear(in_f, hidden_dim), + nn.LeakyReLU(inplace=True), + nn.Linear(hidden_dim, hidden_dim), + nn.LeakyReLU(inplace=True), + nn.Linear(hidden_dim, out_f),) + + def forward(self, x): + x = self.pool(x) + x = self.mlp(x) + return x + +class Conv2d1x1(nn.Module): + def __init__(self, in_f, hidden_dim, out_f): + super(Conv2d1x1, self).__init__() + self.conv2d = nn.Sequential(nn.Conv2d(in_f, hidden_dim, 1, 1), + nn.LeakyReLU(inplace=True), + nn.Conv2d(hidden_dim, hidden_dim, 1, 1), + nn.LeakyReLU(inplace=True), + nn.Conv2d(hidden_dim, out_f, 1, 1),) + + def forward(self, x): + x = self.conv2d(x) + return x + +class Head(nn.Module): + def __init__(self, in_f, hidden_dim, out_f): + super(Head, self).__init__() + self.do = nn.Dropout(0.2) + self.pool = nn.AdaptiveAvgPool2d(1) + self.mlp = nn.Sequential(nn.Linear(in_f, hidden_dim), + nn.LeakyReLU(inplace=True), + nn.Linear(hidden_dim, out_f),) + + def forward(self, x): + bs = x.size()[0] + x_feat = self.pool(x).view(bs, -1) + x = self.mlp(x_feat) + x = self.do(x) + return x, x_feat diff --git a/training/detectors/utils/iid_api.py b/training/detectors/utils/iid_api.py new file mode 100644 index 0000000000000000000000000000000000000000..d86c0e5ea9f6a360e704b8ec38c14d01d5c0a17b --- /dev/null +++ b/training/detectors/utils/iid_api.py @@ -0,0 +1,268 @@ +from __future__ import print_function +from __future__ import division +import torch +import torch.nn as nn +from torch.nn import Parameter +import torch.distributed as dist +import math + + +def l2_norm(input, axis=1): + norm = torch.norm(input, p=2, dim=axis, keepdim=True) + output = torch.div(input, norm) + return output + + +def calc_logits(embeddings, kernel): + """ calculate original logits + """ + embeddings = l2_norm(embeddings, axis=1) + kernel_norm = l2_norm(kernel, axis=0) + cos_theta = torch.mm(embeddings, kernel_norm) + cos_theta = cos_theta.clamp(-1, 1) # for numerical stability + with torch.no_grad(): + origin_cos = cos_theta.clone() + return cos_theta, origin_cos + + +@torch.no_grad() +def all_gather_tensor(input_tensor): + """ allgather tensor (difference size in 0-dim) from all workers + """ + world_size = dist.get_world_size() + + tensor_size = torch.tensor([input_tensor.shape[0]], dtype=torch.int64).cuda() + tensor_size_list = [torch.ones_like(tensor_size) for _ in range(world_size)] + dist.all_gather(tensor_list=tensor_size_list, tensor=tensor_size, async_op=False) + max_size = torch.cat(tensor_size_list, dim=0).max() + + padded = torch.empty(max_size.item(), *input_tensor.shape[1:], dtype=input_tensor.dtype).cuda() + padded[:input_tensor.shape[0]] = input_tensor + padded_list = [torch.ones_like(padded) for _ in range(world_size)] + dist.all_gather(tensor_list=padded_list, tensor=padded, async_op=False) + + slices = [] + for ts, t in zip(tensor_size_list, padded_list): + slices.append(t[:ts.item()]) + return torch.cat(slices, dim=0) + + +def calc_top1_acc(original_logits, label,ddp=False): + """ + Compute the top1 accuracy during training + :param original_logits: logits w/o margin, [bs, C] + :param label: labels [bs] + :return: acc in all gpus + """ + assert (original_logits.size()[0] == label.size()[0]) + + with torch.no_grad(): + _, max_index = torch.max(original_logits, dim=1, keepdim=False) # local max logit + count = (max_index == label).sum() + if ddp: + dist.all_reduce(count, dist.ReduceOp.SUM) + + return count.item() / (original_logits.size()[0] * dist.get_world_size()) + else: + return count.item() / (original_logits.size()[0]) + +def l2_norm(input, axis=1): + norm = torch.norm(input, p=2, dim=axis, keepdim=True) + output = torch.div(input, norm) + return output + + +class FC_ddp2(nn.Module): + """ + Implement of (CVPR2021 Consistent Instance False Positive Improves Fairness in Face Recognition) + No model parallel is used + """ + + def __init__(self, + in_features, + out_features, + scale=64.0, + margin=0.4, + mode='cosface', + use_cifp=False, + reduction='mean', + ddp=False): + """ Args: + in_features: size of each input features + out_features: size of each output features + scale: norm of input feature + margin: margin + """ + super(FC_ddp2, self).__init__() + self.in_features = in_features + self.out_features = out_features # num of classes + self.scale = scale + self.margin = margin + self.mode = mode + self.use_cifp = use_cifp + self.kernel = Parameter(torch.Tensor(in_features, out_features)) + self.ddp = ddp + nn.init.normal_(self.kernel, std=0.01) + + self.criteria = torch.nn.CrossEntropyLoss(reduction=reduction) + + def apply_margin(self, target_cos_theta): + assert self.mode in ['cosface', 'arcface'], 'Please check the mode' + if self.mode == 'arcface': + cos_m = math.cos(self.margin) + sin_m = math.sin(self.margin) + theta = math.cos(math.pi - self.margin) + sinmm = math.sin(math.pi - self.margin) * self.margin + sin_theta = torch.sqrt(1.0 - torch.pow(target_cos_theta, 2)) + cos_theta_m = target_cos_theta * cos_m - sin_theta * sin_m + target_cos_theta_m = torch.where( + target_cos_theta > theta, cos_theta_m, target_cos_theta - sinmm) + elif self.mode == 'cosface': + target_cos_theta_m = target_cos_theta - self.margin + + return target_cos_theta_m + + def forward(self, embeddings, label, return_logits=False): + """ + + :param embeddings: local gpu [bs, 512] + :param label: local labels [bs] + :param return_logits: bool + :return: + loss: computed local loss, w/wo CIFP + acc: local accuracy in one gpu + output: local logits with margins, with gradients, scaled, [bs, C]. + """ + sample_num = embeddings.size(0) + + if not self.use_cifp: + cos_theta, origin_cos = calc_logits(embeddings, self.kernel) + target_cos_theta = cos_theta[torch.arange(0, sample_num), label].view(-1, 1) + target_cos_theta_m = self.apply_margin(target_cos_theta) + cos_theta.scatter_(1, label.view(-1, 1).long(), target_cos_theta_m) + else: + cos_theta, origin_cos = calc_logits(embeddings, self.kernel) + cos_theta_, _ = calc_logits(embeddings, self.kernel.detach()) + + mask = torch.zeros_like(cos_theta) # [bs,C] + mask.scatter_(1, label.view(-1, 1).long(), 1.0) # one-hot label / gt mask + + tmp_cos_theta = cos_theta - 2 * mask + tmp_cos_theta_ = cos_theta_ - 2 * mask + + target_cos_theta = cos_theta[torch.arange(0, sample_num), label].view(-1, 1) + target_cos_theta_ = cos_theta_[torch.arange(0, sample_num), label].view(-1, 1) + + target_cos_theta_m = self.apply_margin(target_cos_theta) + + far = 1 / (self.out_features - 1) # ru+ value + # far = 1e-5 + + topk_mask = torch.greater(tmp_cos_theta, target_cos_theta) + topk_sum = torch.sum(topk_mask.to(torch.int32)) + if self.ddp: + dist.all_reduce(topk_sum) + far_rank = math.ceil(far * (sample_num * (self.out_features - 1) * dist.get_world_size() - topk_sum)) + cos_theta_neg_topk = torch.topk((tmp_cos_theta - 2 * topk_mask.to(torch.float32)).flatten(), + k=far_rank)[0] # [far_rank] + cos_theta_neg_topk = all_gather_tensor(cos_theta_neg_topk.contiguous()) # top k across all gpus + cos_theta_neg_th = torch.topk(cos_theta_neg_topk, k=far_rank)[0][-1] + + cond = torch.mul(torch.bitwise_not(topk_mask), torch.greater(tmp_cos_theta, cos_theta_neg_th)) + cos_theta_neg_topk = torch.mul(cond.to(torch.float32), tmp_cos_theta) + cos_theta_neg_topk_ = torch.mul(cond.to(torch.float32), tmp_cos_theta_) + cond = torch.greater(target_cos_theta_m, cos_theta_neg_topk) + + cos_theta_neg_topk = torch.where(cond, cos_theta_neg_topk, cos_theta_neg_topk_) + cos_theta_neg_topk = torch.pow(cos_theta_neg_topk, 2) # F = z^p = cos^2 + times = torch.sum(torch.greater(cos_theta_neg_topk, 0).to(torch.float32), dim=1, keepdim=True) + times = torch.where(torch.greater(times, 0), times, torch.ones_like(times)) + cos_theta_neg_topk = torch.sum(cos_theta_neg_topk, dim=1, keepdim=True) / times # ri+/ru+ + + target_cos_theta_m = target_cos_theta_m - (1 + target_cos_theta_) * cos_theta_neg_topk + cos_theta.scatter_(1, label.view(-1, 1).long(), target_cos_theta_m) + + output = cos_theta * self.scale + loss = self.criteria(output, label) + acc = calc_top1_acc(origin_cos * self.scale, label,self.ddp) + + if return_logits: + return loss, acc, output + + return loss, acc + + +class FC_ddp(nn.Module): + """ + Implement of (CVPR2021 Consistent Instance False Positive Improves Fairness in Face Recognition) + No model parallel is used + """ + + def __init__(self, + in_features, + out_features, + scale=8.0, + margin=0.2, + mode='cosface', + use_cifp=False, + reduction='mean'): + """ Args: + in_features: size of each input features + out_features: size of each output features + scale: norm of input feature + margin: margin + """ + super(FC_ddp, self).__init__() + self.in_features = in_features + self.out_features = out_features # num of classes + self.scale = scale + self.margin = margin + self.mode = mode + self.use_cifp = use_cifp + # self.kernel = Parameter(torch.Tensor(in_features, out_features)) + # nn.init.normal_(self.kernel, std=0.01) + + self.criteria = torch.nn.CrossEntropyLoss(reduction=reduction) + self.sig = torch.nn.Sigmoid() + + def apply_margin(self, target_cos_theta): + assert self.mode in ['cosface', 'arcface'], 'Please check the mode' + if self.mode == 'arcface': + cos_m = math.cos(self.margin) + sin_m = math.sin(self.margin) + theta = math.cos(math.pi - self.margin) + sinmm = math.sin(math.pi - self.margin) * self.margin + sin_theta = torch.sqrt(1.0 - torch.pow(target_cos_theta, 2)) + cos_theta_m = target_cos_theta * cos_m - sin_theta * sin_m + target_cos_theta_m = torch.where( + target_cos_theta > theta, cos_theta_m, target_cos_theta - sinmm) + elif self.mode == 'cosface': + target_cos_theta_m = target_cos_theta - self.margin + + return target_cos_theta_m + + def forward(self, embeddings, label, return_logits=False): + """ + + :param embeddings: local gpu [bs, 512] + :param label: local labels [bs] + :param return_logits: bool + :return: + loss: computed local loss, w/wo CIFP + acc: local accuracy in one gpu + output: local logits with margins, with gradients, scaled, [bs, C]. + """ + sample_num = embeddings.size(0) + cos_theta = self.sig(embeddings) + target_cos_theta = cos_theta[torch.arange(0, sample_num), label].view(-1, 1) + # target_cos_theta_m = target_cos_theta - self.margin + target_cos_theta = target_cos_theta - self.margin + # cos_theta.scatter_(1, label.view(-1, 1).long(), target_cos_theta_m) + out = cos_theta.clone() + out.scatter_(1, label.view(-1, 1).long(), target_cos_theta) + + out = out * self.scale + + loss = self.criteria(out, label) + + return loss diff --git a/training/detectors/utils/lsad_api.py b/training/detectors/utils/lsad_api.py new file mode 100644 index 0000000000000000000000000000000000000000..445698614ba6d2aabc47d8625cc181cec2e1fd93 --- /dev/null +++ b/training/detectors/utils/lsad_api.py @@ -0,0 +1,83 @@ +import random + +import torch + + +def augment_domains(self, groups_feature_maps): + # Helper Functions + def hard_example_interpolation(z_i, hard_example, lambda_1): + return z_i + lambda_1 * (hard_example - z_i) + + def hard_example_extrapolation(z_i, mean_latent, lambda_2): + return z_i + lambda_2 * (z_i - mean_latent) + + def add_gaussian_noise(z_i, sigma, lambda_3): + epsilon = torch.randn_like(z_i) * sigma + return z_i + lambda_3 * epsilon + + def difference_transform(z_i, z_j, z_k, lambda_4): + return z_i + lambda_4 * (z_j - z_k) + + def distance(z_i, z_j): + return torch.norm(z_i - z_j) + + domain_number = len(groups_feature_maps[0]) + + # Calculate the mean latent vector for each domain across all groups + domain_means = [] + for domain_idx in range(domain_number): + all_samples_in_domain = torch.cat([group[domain_idx] for group in groups_feature_maps], dim=0) + domain_mean = torch.mean(all_samples_in_domain, dim=0) + domain_means.append(domain_mean) + + # Identify the hard example for each domain across all groups + hard_examples = [] + for domain_idx in range(domain_number): + all_samples_in_domain = torch.cat([group[domain_idx] for group in groups_feature_maps], dim=0) + distances = torch.tensor([distance(z, domain_means[domain_idx]) for z in all_samples_in_domain]) + hard_example = all_samples_in_domain[torch.argmax(distances)] + hard_examples.append(hard_example) + + augmented_groups = [] + + for group_feature_maps in groups_feature_maps: + augmented_domains = [] + + for domain_idx, domain_feature_maps in enumerate(group_feature_maps): + # Choose a random augmentation + augmentations = [ + lambda z: hard_example_interpolation(z, hard_examples[domain_idx], random.random()), + lambda z: hard_example_extrapolation(z, domain_means[domain_idx], random.random()), + lambda z: add_gaussian_noise(z, random.random(), random.random()), + lambda z: difference_transform(z, domain_feature_maps[0], domain_feature_maps[1], random.random()) + ] + chosen_aug = random.choice(augmentations) + augmented = torch.stack([chosen_aug(z) for z in domain_feature_maps]) + augmented_domains.append(augmented) + + augmented_domains = torch.stack(augmented_domains) + augmented_groups.append(augmented_domains) + + return torch.stack(augmented_groups) + + +def mixup_in_latent_space(self, data): + # data shape: [batchsize, num_domains, 3, 256, 256] + bs, num_domains, _, _, _ = data.shape + + # Initialize an empty tensor for mixed data + mixed_data = torch.zeros_like(data) + + # For each sample in the batch + for i in range(bs): + # Step 1: Generate a shuffled index list for the domains + shuffled_idxs = torch.randperm(num_domains) + + # Step 2: Choose random alpha between 0.5 and 2, then sample lambda from beta distribution + alpha = torch.rand(1) * 1.5 + 0.5 # random alpha between 0.5 and 2 + lambda_ = torch.distributions.beta.Beta(alpha, alpha).sample().to(data.device) + + # Step 3: Perform mixup using the shuffled indices + mixed_data[i] = lambda_ * data[i] + (1 - lambda_) * data[i, shuffled_idxs] + + return mixed_data \ No newline at end of file diff --git a/training/detectors/utils/sladd_api.py b/training/detectors/utils/sladd_api.py new file mode 100644 index 0000000000000000000000000000000000000000..84174ea864f816f504be66563f209c62c29f4862 --- /dev/null +++ b/training/detectors/utils/sladd_api.py @@ -0,0 +1,668 @@ +import torch +import math +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +import yaml +from PIL import Image +import cv2 +from torchvision import transforms as T +from skimage import measure +from skimage.transform import PiecewiseAffineTransform, warp +from torch.autograd import Variable +from scipy.ndimage import binary_erosion, binary_dilation + +from dataset.pair_dataset import pairDataset +from dataset.utils.color_transfer import color_transfer +from dataset.utils.faceswap_utils_sladd import blendImages as alpha_blend_fea +from dataset.utils import faceswap + + + +class Block(nn.Module): + def __init__(self, in_filters, out_filters, reps, strides=1, start_with_relu=True, grow_first=True): + super(Block, self).__init__() + + if out_filters != in_filters or strides != 1: + self.skip = nn.Conv2d(in_filters, out_filters, + 1, stride=strides, bias=False) + self.skipbn = nn.BatchNorm2d(out_filters) + else: + self.skip = None + + self.relu = nn.ReLU(inplace=True) + rep = [] + + filters = in_filters + if grow_first: # whether the number of filters grows first + rep.append(self.relu) + rep.append(SeparableConv2d(in_filters, out_filters, + 3, stride=1, padding=1, bias=False)) + rep.append(nn.BatchNorm2d(out_filters)) + filters = out_filters + + for i in range(reps - 1): + rep.append(self.relu) + rep.append(SeparableConv2d(filters, filters, + 3, stride=1, padding=1, bias=False)) + rep.append(nn.BatchNorm2d(filters)) + + if not grow_first: + rep.append(self.relu) + rep.append(SeparableConv2d(in_filters, out_filters, + 3, stride=1, padding=1, bias=False)) + rep.append(nn.BatchNorm2d(out_filters)) + + if not start_with_relu: + rep = rep[1:] + else: + rep[0] = nn.ReLU(inplace=False) + + if strides != 1: + rep.append(nn.MaxPool2d(3, strides, 1)) + self.rep = nn.Sequential(*rep) + + def forward(self, inp): + x = self.rep(inp) + + if self.skip is not None: + skip = self.skip(inp) + skip = self.skipbn(skip) + else: + skip = inp + + x += skip + return x + +class SeparableConv2d(nn.Module): + def __init__(self, c_in, c_out, ks, stride=1, padding=0, dilation=1, bias=False): + super(SeparableConv2d, self).__init__() + self.c = nn.Conv2d(c_in, c_in, ks, stride, padding, dilation, groups=c_in, bias=bias) + self.pointwise = nn.Conv2d(c_in, c_out, 1, 1, 0, 1, 1, bias=bias) + + def forward(self, x): + x = self.c(x) + x = self.pointwise(x) + return x + +class Xception_SLADDSyn(nn.Module): + """ + Xception optimized for the ImageNet dataset, as specified in + https://arxiv.org/pdf/1610.02357.pdf + """ + + def __init__(self, num_classes=2, num_region=7, num_type=2, num_mag=1, inc=6): + """ Constructor + Args: + num_classes: number of classes + """ + super(Xception_SLADDSyn, self).__init__() + self.num_region = num_region + self.num_type = num_type + self.num_mag = num_mag + dropout = 0.5 + + # Entry flow + self.iniconv = nn.Conv2d(inc, 32, 3, 2, 0, bias=False) + # self.conv1 = nn.Conv2d(inc, 32, 3, 2, 0, bias=False) + self.bn1 = nn.BatchNorm2d(32) + self.relu = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(32, 64, 3, bias=False) + self.bn2 = nn.BatchNorm2d(64) + # do relu here + + self.block1 = Block( + 64, 128, 2, 2, start_with_relu=False, grow_first=True) + self.block2 = Block( + 128, 256, 2, 2, start_with_relu=True, grow_first=True) + self.block3 = Block( + 256, 728, 2, 2, start_with_relu=True, grow_first=True) + + # middle flow + self.block4 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block5 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block6 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block7 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + + self.block8 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block9 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block10 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block11 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + + # Exit flow + self.block12 = Block( + 728, 1024, 2, 2, start_with_relu=True, grow_first=False) + + self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) + self.bn3 = nn.BatchNorm2d(1536) + + # do relu here + self.conv4 = SeparableConv2d(1536, 2048, 3, 1, 1) + self.bn4 = nn.BatchNorm2d(2048) + self.fc_region = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(2048, num_region)) + self.fc_type = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(2048, num_type)) + self.fc_mag = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(2048, num_mag)) + + def fea_part1_0(self, x): + x = self.iniconv(x) + x = self.bn1(x) + x = self.relu(x) + + return x + + def fea_part1_1(self, x): + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + return x + + def fea_part1(self, x): + x = self.iniconv(x) + x = self.bn1(x) + x = self.relu(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + return x + + def fea_part2(self, x): + x = self.block1(x) + x = self.block2(x) + x = self.block3(x) + + return x + + def fea_part3(self, x): + x = self.block4(x) + x = self.block5(x) + x = self.block6(x) + x = self.block7(x) + + return x + + def fea_part4(self, x): + x = self.block8(x) + x = self.block9(x) + x = self.block10(x) + x = self.block11(x) + x = self.block12(x) + + return x + + def fea_part5(self, x): + x = self.conv3(x) + x = self.bn3(x) + x = self.relu(x) + + x = self.conv4(x) + x = self.bn4(x) + + return x + + def features(self, input): + x = self.fea_part1(input) + + x = self.fea_part2(x) + x = self.fea_part3(x) + x = self.fea_part4(x) + + x = self.fea_part5(x) + return x + + def classifier(self, features): + x = self.relu(features) + + x = F.adaptive_avg_pool2d(x, (1, 1)) + x = x.view(x.size(0), -1) + out = self.last_linear(x) + return out, x + + def forward(self, input): + x = self.features(input) + x = self.relu(x) + x = F.adaptive_avg_pool2d(x, (1, 1)) + x = x.view(x.size(0), -1) + + region_num = self.fc_region(x) + type_num = self.fc_type(x) + mag = self.fc_mag(x) + + return region_num, type_num, mag + + +def mask_postprocess(mask): + def blur_mask(mask): + blur_k = 2 * np.random.randint(1, 10) - 1 + + # kernel = np.ones((blur_k+1, blur_k+1), np.uint8) + # mask = cv2.erode(mask, kernel) + + mask = cv2.GaussianBlur(mask, (blur_k, blur_k), 0) + + return mask + + # random erode/dilate + prob = np.random.rand() + if prob < 0.3: + erode_k = 2 * np.random.randint(1, 10) + 1 + kernel = np.ones((erode_k, erode_k), np.uint8) + mask = cv2.erode(mask, kernel) + elif prob < 0.6: + erode_k = 2 * np.random.randint(1, 10) + 1 + kernel = np.ones((erode_k, erode_k), np.uint8) + mask = cv2.dilate(mask, kernel) + + # random blur + if np.random.rand() < 0.9: + mask = blur_mask(mask) + + return mask + +def xception(num_region=7, num_type=2, num_mag=1, pretrained='imagenet', inc=6): + model = Xception_SLADDSyn(num_region=num_region, num_type=num_type, num_mag=num_mag, inc=inc) + return model + + + +class TransferModel(nn.Module): + """ + Simple transfer learning model that takes an imagenet pretrained model with + a fc layer as base model and retrains a new fc layer for num_out_classes + """ + + def __init__(self, config, num_region=7, num_type=2, num_mag=1, return_fea=False, inc=6): + super(TransferModel, self).__init__() + self.return_fea = return_fea + def return_pytorch04_xception(pretrained=True): + # Raises warning "src not broadcastable to dst" but thats fine + model = xception(num_region=num_region, num_type=num_type, num_mag=num_mag, inc=inc, pretrained=False) + if pretrained: + # Load model in torch 0.4+ + # model.fc = model.last_linear + # del model.last_linear + state_dict = torch.load(config['pretrained']) + print('Loaded pretrained model (ImageNet)....') + for name, weights in state_dict.items(): + if 'pointwise' in name: + state_dict[name] = weights.unsqueeze( + -1).unsqueeze(-1) + model.load_state_dict(state_dict, strict=False) + # model.last_linear = model.fc + # del model.fc + return model + + self.model = return_pytorch04_xception() + # Replace fc + + if inc != 3: + self.model.iniconv = nn.Conv2d(inc, 32, 3, 2, 0, bias=False) + nn.init.xavier_normal(self.model.iniconv.weight.data, gain=0.02) + + def set_trainable_up_to(self, boolean=False, layername="Conv2d_4a_3x3"): + """ + Freezes all layers below a specific layer and sets the following layers + to true if boolean else only the fully connected final layer + :param boolean: + :param layername: depends on lib, for inception e.g. Conv2d_4a_3x3 + :return: + """ + # Stage-1: freeze all the layers + if layername is None: + for i, param in self.model.named_parameters(): + param.requires_grad = True + return + else: + for i, param in self.model.named_parameters(): + param.requires_grad = False + if boolean: + # Make all layers following the layername layer trainable + ct = [] + found = False + for name, child in self.model.named_children(): + if layername in ct: + found = True + for params in child.parameters(): + params.requires_grad = True + ct.append(name) + if not found: + raise NotImplementedError('Layer not found, cant finetune!'.format( + layername)) + else: + # Make fc trainable + for param in self.model.last_linear.parameters(): + param.requires_grad = True + + def forward(self, x): + region_num, type_num, mag = self.model(x) + return region_num, type_num, mag + + def features(self, x): + x = self.model.features(x) + return x + + def classifier(self, x): + out, x = self.model.classifier(x) + return out, x + + + +def dist(p1, p2): + return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) + + +def generate_random_mask(mask, res=256): + randwl = np.random.randint(10, 60) + randwr = np.random.randint(10, 60) + randhu = np.random.randint(10, 60) + randhd = np.random.randint(10, 60) + newmask = np.zeros(mask.shape) + mask = np.where(mask > 0.1, 1, 0) + props = measure.regionprops(mask) + if len(props) == 0: + return newmask + center_x, center_y = props[0].centroid + center_x = int(round(center_x)) + center_y = int(round(center_y)) + newmask[max(center_x - randwl, 0):min(center_x + randwr, res - 1), + max(center_y - randhu, 0):min(center_x + randhd, res - 1)] = 1 + newmask *= mask + return newmask + + +def random_deform(mask, nrows, ncols, mean=0, std=10): + h, w = mask.shape[:2] + rows = np.linspace(0, h - 1, nrows).astype(np.int32) + cols = np.linspace(0, w - 1, ncols).astype(np.int32) + rows += np.random.normal(mean, std, size=rows.shape).astype(np.int32) + rows += np.random.normal(mean, std, size=cols.shape).astype(np.int32) + rows, cols = np.meshgrid(rows, cols) + anchors = np.vstack([rows.flat, cols.flat]).T + assert anchors.shape[1] == 2 and anchors.shape[0] == ncols * nrows + deformed = anchors + np.random.normal(mean, std, size=anchors.shape) + np.clip(deformed[:, 0], 0, h - 1, deformed[:, 0]) + np.clip(deformed[:, 1], 0, w - 1, deformed[:, 1]) + + trans = PiecewiseAffineTransform() + trans.estimate(anchors, deformed.astype(np.int32)) + warped = warp(mask, trans) + warped *= mask + blured = cv2.GaussianBlur(warped.astype(float), (5, 5), 3) + return blured + + +def get_five_key(landmarks_68): + # get the five key points by using the landmarks + leye_center = (landmarks_68[36] + landmarks_68[39]) * 0.5 + reye_center = (landmarks_68[42] + landmarks_68[45]) * 0.5 + nose = landmarks_68[33] + lmouth = landmarks_68[48] + rmouth = landmarks_68[54] + leye_left = landmarks_68[36] + leye_right = landmarks_68[39] + reye_left = landmarks_68[42] + reye_right = landmarks_68[45] + out = [tuple(x.astype('int32')) for x in [ + leye_center, reye_center, nose, lmouth, rmouth, leye_left, leye_right, reye_left, reye_right + ]] + return out + + +def remove_eyes(image, landmarks, opt): + ##l: left eye; r: right eye, b: both eye + if opt == 'l': + (x1, y1), (x2, y2) = landmarks[5:7] + elif opt == 'r': + (x1, y1), (x2, y2) = landmarks[7:9] + elif opt == 'b': + (x1, y1), (x2, y2) = landmarks[:2] + else: + print('wrong region') + mask = np.zeros_like(image[..., 0]) + line = cv2.line(mask, (x1, y1), (x2, y2), color=(1), thickness=2) + w = dist((x1, y1), (x2, y2)) + dilation = int(w // 4) + if opt != 'b': + dilation *= 4 + line = binary_dilation(line, iterations=dilation) + return line + + +def remove_nose(image, landmarks): + (x1, y1), (x2, y2) = landmarks[:2] + x3, y3 = landmarks[2] + mask = np.zeros_like(image[..., 0]) + x4 = int((x1 + x2) / 2) + y4 = int((y1 + y2) / 2) + line = cv2.line(mask, (x3, y3), (x4, y4), color=(1), thickness=2) + w = dist((x1, y1), (x2, y2)) + dilation = int(w // 4) + line = binary_dilation(line, iterations=dilation) + return line + + +def remove_mouth(image, landmarks): + (x1, y1), (x2, y2) = landmarks[3:5] + mask = np.zeros_like(image[..., 0]) + line = cv2.line(mask, (x1, y1), (x2, y2), color=(1), thickness=2) + w = dist((x1, y1), (x2, y2)) + dilation = int(w // 3) + line = binary_dilation(line, iterations=dilation) + return line + + +def blend_fake_to_real(realimg, real_lmk, fakeimg, fakemask, fake_lmk, deformed_fakemask, type, mag): + # source: fake image + # target: real image + realimg = ((realimg + 1) / 2 * 255).astype(np.uint8) + fakeimg = ((fakeimg + 1) / 2 * 255).astype(np.uint8) + H, W, C = realimg.shape + #由于我们已经做过对齐,这里可以直接用。原代码是做了对齐操作的. 这个src就是fake + aligned_src = fakeimg + src_mask = deformed_fakemask + src_mask = src_mask > 0 # (H, W) + + tgt_mask = np.asarray(src_mask, dtype=np.uint8) + tgt_mask = mask_postprocess(tgt_mask) + + ct_modes = ['rct-m', 'rct-fs', 'avg-align', 'faceswap'] + mode_idx = np.random.randint(len(ct_modes)) + mode = ct_modes[mode_idx] + + if mode != 'faceswap': + c_mask = tgt_mask / 255. + c_mask[c_mask > 0] = 1 + if len(c_mask.shape) < 3: + c_mask = np.expand_dims(c_mask, 2) + src_crop = color_transfer(mode, aligned_src, realimg, c_mask) + else: + c_mask = tgt_mask.copy() + c_mask[c_mask > 0] = 255 + masked_tgt = faceswap.apply_mask(realimg, c_mask) + masked_src = faceswap.apply_mask(aligned_src, c_mask) + src_crop = faceswap.correct_colours(masked_tgt, masked_src, np.array(real_lmk)) + + if tgt_mask.mean() < 0.005 or src_crop.max() == 0: + out_blend = realimg + else: + if type == 0: + out_blend, a_mask = alpha_blend_fea(src_crop, realimg, tgt_mask, + featherAmount=0.2 * np.random.rand()) + elif type == 1: + b_mask = (tgt_mask * 255).astype(np.uint8) + l, t, w, h = cv2.boundingRect(b_mask) + center = (int(l + w / 2), int(t + h / 2)) + out_blend = cv2.seamlessClone(src_crop, realimg, b_mask, center, cv2.NORMAL_CLONE) + else: + out_blend = copy_fake_to_real(realimg, src_crop, tgt_mask, mag) + + return out_blend, tgt_mask + + +def copy_fake_to_real(realimg, fakeimg, mask, mag): + mask = np.expand_dims(mask, 2) + newimg = fakeimg * mask * mag + realimg * (1 - mask) + realimg * mask * (1 - mag) + return newimg + + +class synthesizer(nn.Module): + def __init__(self,config): + super(synthesizer, self).__init__() + self.netG = TransferModel(config=config,num_region=10, num_type=4, num_mag=1, inc=6) + normalize = T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + self.transforms = T.Compose([T.ToTensor(), normalize]) + + def parse(self, img, reg, real_lmk, fakemask): + five_key = get_five_key(real_lmk) + if reg == 0: + mask = remove_eyes(img, five_key, 'l') + elif reg == 1: + mask = remove_eyes(img, five_key, 'r') + elif reg == 2: + mask = remove_eyes(img, five_key, 'b') + elif reg == 3: + mask = remove_nose(img, five_key) + elif reg == 4: + mask = remove_mouth(img, five_key) + elif reg == 5: + mask = remove_nose(img, five_key) + remove_eyes(img, five_key, 'l') + elif reg == 6: + mask = remove_nose(img, five_key) + remove_eyes(img, five_key, 'r') + elif reg == 7: + mask = remove_nose(img, five_key) + remove_eyes(img, five_key, 'b') + elif reg == 8: + mask = remove_nose(img, five_key) + remove_mouth(img, five_key) + elif reg == 9: + mask = remove_eyes(img, five_key, 'b') + remove_nose(img, five_key) + remove_mouth(img, five_key) + else: + mask = generate_random_mask(fakemask) + mask = random_deform(mask, 5, 5) + return mask * 1.0 + + def get_variable(self, inputs, cuda=False, **kwargs): + if type(inputs) in [list, np.ndarray]: + inputs = torch.Tensor(inputs) + if cuda: + out = Variable(inputs.cuda(), **kwargs) + else: + out = Variable(inputs, **kwargs) + return out + + def calculate(self, logits): + if logits.shape[1] != 1: + probs = F.softmax(logits, dim=-1) + log_prob = F.log_softmax(logits, dim=-1) + entropy = -(log_prob * probs).sum(1, keepdim=False) + action = probs.multinomial(num_samples=1).data + selected_log_prob = log_prob.gather(1, self.get_variable(action, requires_grad=False)) + else: + probs = torch.sigmoid(logits) + log_prob = torch.log(torch.sigmoid(logits)) + entropy = -(log_prob * probs).sum(1, keepdim=False) + action = probs + selected_log_prob = log_prob + return entropy, selected_log_prob[:, 0], action[:, 0] + + def forward(self, img, fake_img, real_lmk, fake_lmk, real_mask, fake_mask, label=None): + # based on pair_dataset, here, img always is real, fake_img always is fake + region_num, type_num, mag = self.netG(torch.cat((img, fake_img), 1)) + reg_etp, reg_log_prob, reg = self.calculate(region_num) + type_etp, type_log_prob, type = self.calculate(type_num) + mag_etp, mag_log_prob, mag = self.calculate(mag) + entropy = reg_etp + type_etp + mag_etp + log_prob = reg_log_prob + type_log_prob + mag_log_prob + newlabel = [] + typelabel = [] + maglabel = [] + magmask = [] + ##################### + alt_img = torch.ones(img.shape) + alt_mask = np.zeros((img.shape[0], 16, 16)) + if label is None: + label=np.zeros(img.shape[0]) + for i in range(img.shape[0]): + imgcp = np.transpose(img[i].cpu().numpy(), (1, 2, 0)).copy() + fake_imgcp = np.transpose(fake_img[i].cpu().numpy(), (1, 2, 0)).copy() + ##only work for real imgs and not do-nothing choice + if label[i] == 0 and type[i] != 3: + mask = self.parse(fake_imgcp, reg[i], fake_lmk[i].cpu().numpy(), + fake_mask[i].cpu().numpy()) + newimg, newmask = blend_fake_to_real(imgcp, real_lmk[i].cpu().numpy(), + fake_imgcp, fake_mask.cpu().numpy(), + fake_lmk[i].cpu().numpy(), mask, type[i], + mag[i].detach().cpu().numpy()) + newimg = self.transforms(Image.fromarray(np.array(newimg, dtype=np.uint8))) + newlabel.append(int(1)) + typelabel.append(int(type[i].cpu().numpy())) + if type[i] == 2: + magmask.append(int(1)) + else: + magmask.append(int(0)) + else: + newimg = self.transforms(Image.fromarray(np.array((imgcp + 1) / 2 * 255, dtype=np.uint8))) + newmask =real_mask[i].squeeze(2)[:,:,0].cpu().numpy() + newlabel.append(int(label[i])) + if label[i] == 0: + typelabel.append(int(3)) + else: + typelabel.append(int(4)) + magmask.append(int(0)) + if newmask is None: + newmask = np.zeros((16, 16)) + newmask = cv2.resize(newmask, (16, 16), interpolation=cv2.INTER_CUBIC) + alt_img[i] = newimg + alt_mask[i] = newmask + + alt_mask = torch.from_numpy(alt_mask.astype(np.float32)).unsqueeze(1) + newlabel = torch.tensor(newlabel) + typelabel = torch.tensor(typelabel) + maglabel = mag + magmask = torch.tensor(magmask) + return log_prob, entropy, alt_img.detach(), alt_mask.detach(), \ + newlabel.detach(), typelabel.detach(), maglabel.detach(), magmask.detach() + + +if __name__ == '__main__': + + with open(r'H:\code\DeepfakeBench\training\config\detector\sladd_xception.yaml', 'r') as f: + config = yaml.safe_load(f) + syn=synthesizer(config=config).cuda() + config['data_manner'] = 'lmdb' + config['dataset_json_folder'] = 'preprocessing/dataset_json_v3' + config['sample_size']=256 + config['with_mask']=True + config['with_landmark']=True + config['use_data_augmentation']=True + config['data_aug']['rotate_prob']=1 + train_set = pairDataset(config=config, mode='train') + train_data_loader = \ + torch.utils.data.DataLoader( + dataset=train_set, + batch_size=config['train_batchSize'], + shuffle=True, + num_workers=0, + collate_fn=train_set.collate_fn, + ) + from tqdm import tqdm + for iteration, batch in enumerate(tqdm(train_data_loader)): + print(iteration) + imgs,lmks,msks=batch['image'].cuda(),batch['landmark'].cuda(),batch['mask'].cuda() + half = len(imgs) // 2 + img, fake_img, real_lmk, fake_lmk, real_mask, fake_mask = imgs[:half],imgs[half:],lmks[:half],lmks[half:],msks[:half],msks[half:] + log_prob, entropy, new_img, alt_mask, label, type_label, mag_label, mag_mask = \ + syn(img, fake_img, real_lmk, fake_lmk, real_mask, fake_mask) + + if iteration > 10: + break + ... \ No newline at end of file diff --git a/training/detectors/utils/slowfast/__init__.py b/training/detectors/utils/slowfast/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e05c811313e73cc666f973abdad760a01ff2244e --- /dev/null +++ b/training/detectors/utils/slowfast/__init__.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +import os +import sys +current_file_path = os.path.abspath(__file__) +parent_dir = os.path.dirname(os.path.dirname(current_file_path)) +project_root_dir = os.path.dirname(parent_dir) +sys.path.append(parent_dir) +sys.path.append(project_root_dir) + +from slowfast.utils.env import setup_environment + +setup_environment() diff --git a/training/detectors/utils/slowfast/config/__init__.py b/training/detectors/utils/slowfast/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8dbe96a785072a24a9bcc4841a1934024f2b06a1 --- /dev/null +++ b/training/detectors/utils/slowfast/config/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. diff --git a/training/detectors/utils/slowfast/config/custom_config.py b/training/detectors/utils/slowfast/config/custom_config.py new file mode 100644 index 0000000000000000000000000000000000000000..8131da2951d8cb629f664b39da4675d0ae5adee5 --- /dev/null +++ b/training/detectors/utils/slowfast/config/custom_config.py @@ -0,0 +1,9 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Add custom configs and default values""" + + +def add_custom_config(_C): + # Add your own customized configs. + pass diff --git a/training/detectors/utils/slowfast/config/defaults(1).py b/training/detectors/utils/slowfast/config/defaults(1).py new file mode 100644 index 0000000000000000000000000000000000000000..083a3c1f31c822143f7a0a68b374ae8b83430b3c --- /dev/null +++ b/training/detectors/utils/slowfast/config/defaults(1).py @@ -0,0 +1,816 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Configs.""" +import yaml +from fvcore.common.config import CfgNode as CfgNodeOri + +from . import custom_config +def load_yaml_with_base(text: str, allow_unsafe: bool = False): + """ + Just like `yaml.load(open(filename))`, but inherit attributes from its + `_BASE_`. + Args: + text (str): the file name of the current config. Will be used to + find the base config file. + allow_unsafe (bool): whether to allow loading the config file with + `yaml.unsafe_load`. + Returns: + (dict): the loaded yaml + """ + cfg = yaml.load(text, Loader=yaml.FullLoader) + return cfg +class CfgNode(CfgNodeOri): + def merge_from_str(self, text, allow_unsafe=False): + loaded_cfg = load_yaml_with_base(text, allow_unsafe=allow_unsafe) + loaded_cfg = type(self)(loaded_cfg) + self.merge_from_other_cfg(loaded_cfg) + +# ----------------------------------------------------------------------------- +# Config definition +# ----------------------------------------------------------------------------- +_C = CfgNode() + + +# ---------------------------------------------------------------------------- # +# Batch norm options +# ---------------------------------------------------------------------------- # +_C.BN = CfgNode() + +# Precise BN stats. +_C.BN.USE_PRECISE_STATS = False + +# Number of samples use to compute precise bn. +_C.BN.NUM_BATCHES_PRECISE = 200 + +# Weight decay value that applies on BN. +_C.BN.WEIGHT_DECAY = 0.0 + +# Norm type, options include `batchnorm`, `sub_batchnorm`, `sync_batchnorm` +_C.BN.NORM_TYPE = "batchnorm" + +# Parameter for SubBatchNorm, where it splits the batch dimension into +# NUM_SPLITS splits, and run BN on each of them separately independently. +_C.BN.NUM_SPLITS = 1 + +# Parameter for NaiveSyncBatchNorm3d, where the stats across `NUM_SYNC_DEVICES` +# devices will be synchronized. +_C.BN.NUM_SYNC_DEVICES = 1 + + +# ---------------------------------------------------------------------------- # +# Training options. +# ---------------------------------------------------------------------------- # +_C.TRAIN = CfgNode() + +# If True Train the model, else skip training. +_C.TRAIN.ENABLE = True + +# Dataset. +_C.TRAIN.DATASET = "kinetics" + +# Total mini-batch size. +_C.TRAIN.BATCH_SIZE = 64 + +_C.TRAIN.SPLIT = "train_subset2.pth" +# Evaluate model on test data every eval period epochs. +_C.TRAIN.EVAL_PERIOD = 1 + +# Save model checkpoint every checkpoint period epochs. +_C.TRAIN.CHECKPOINT_PERIOD = 1 + +# Save model checkpoint every checkpoint period iters. +_C.TRAIN.CHECKPOINT_PERIOD_BY_ITER = 500 + + +# Resume training from the latest checkpoint in the output directory. +_C.TRAIN.AUTO_RESUME = True + +# Path to the checkpoint to load the initial weight. +_C.TRAIN.CHECKPOINT_FILE_PATH = "" + +# Checkpoint types include `caffe2` or `pytorch`. +_C.TRAIN.CHECKPOINT_TYPE = "pytorch" + +# If True, perform inflation when loading checkpoint. +_C.TRAIN.CHECKPOINT_INFLATE = False + + +# ---------------------------------------------------------------------------- # +# Testing options +# ---------------------------------------------------------------------------- # +_C.TEST = CfgNode() + +# If True test the model, else skip the testing. +_C.TEST.ENABLE = True + +# Dataset for testing. +_C.TEST.DATASET = "kinetics" + +_C.TEST.SPLIT = "test_subset2.pth" +# Total mini-batch size +_C.TEST.BATCH_SIZE = 8 + +# Path to the checkpoint to load the initial weight. +_C.TEST.CHECKPOINT_FILE_PATH = "" + +# Number of clips to sample from a video uniformly for aggregating the +# prediction results. +_C.TEST.NUM_ENSEMBLE_VIEWS = 10 + +# Number of crops to sample from a frame spatially for aggregating the +# prediction results. +_C.TEST.NUM_SPATIAL_CROPS = 3 + +# Checkpoint types include `caffe2` or `pytorch`. +_C.TEST.CHECKPOINT_TYPE = "pytorch" +# Path to saving prediction results file. +_C.TEST.SAVE_RESULTS_PATH = "" +# ----------------------------------------------------------------------------- +# ResNet options +# ----------------------------------------------------------------------------- +_C.RESNET = CfgNode() + +# Transformation function. +_C.RESNET.TRANS_FUNC = "bottleneck_transform" + +# Number of groups. 1 for ResNet, and larger than 1 for ResNeXt). +_C.RESNET.NUM_GROUPS = 1 + +# Width of each group (64 -> ResNet; 4 -> ResNeXt). +_C.RESNET.WIDTH_PER_GROUP = 64 + +# Apply relu in a inplace manner. +_C.RESNET.INPLACE_RELU = True + +# Apply stride to 1x1 conv. +_C.RESNET.STRIDE_1X1 = False + +# If true, initialize the gamma of the final BN of each block to zero. +_C.RESNET.ZERO_INIT_FINAL_BN = False + +# Number of weight layers. +_C.RESNET.DEPTH = 50 + + +# label of branchs +_C.RESNET.LABELS = ["continus","discontinus"] + +# If the current block has more than NUM_BLOCK_TEMP_KERNEL blocks, use temporal +# kernel of 1 for the rest of the blocks. +_C.RESNET.NUM_BLOCK_TEMP_KERNEL = [[3], [4], [6], [3]] + +# Size of stride on different res stages. +_C.RESNET.SPATIAL_STRIDES = [[1], [2], [2], [2]] + +# Size of dilation on different res stages. +_C.RESNET.SPATIAL_DILATIONS = [[1], [1], [1], [1]] + + +# ----------------------------------------------------------------------------- +# Nonlocal options +# ----------------------------------------------------------------------------- +_C.NONLOCAL = CfgNode() + +# Index of each stage and block to add nonlocal layers. +_C.NONLOCAL.LOCATION = [[[]], [[]], [[]], [[]]] + +# Number of group for nonlocal for each stage. +_C.NONLOCAL.GROUP = [[1], [1], [1], [1]] + +# Instatiation to use for non-local layer. +_C.NONLOCAL.INSTANTIATION = "dot_product" + + +# Size of pooling layers used in Non-Local. +_C.NONLOCAL.POOL = [ + # Res2 + [[1, 2, 2], [1, 2, 2]], + # Res3 + [[1, 2, 2], [1, 2, 2]], + # Res4 + [[1, 2, 2], [1, 2, 2]], + # Res5 + [[1, 2, 2], [1, 2, 2]], +] + +# ----------------------------------------------------------------------------- +# Model options +# ----------------------------------------------------------------------------- +_C.MODEL = CfgNode() + +# Model architecture. +_C.MODEL.ARCH = "slowfast" + +# Model name +_C.MODEL.MODEL_NAME = "SlowFast" + +# The number of classes to predict for the model. +_C.MODEL.NUM_CLASSES = 400 + +# Loss function. +_C.MODEL.LOSS_FUNC = "cross_entropy" + +_C.MODEL.MASK_WEIGHT = 100 + +_C.MODEL.CLASS_WEIGHT = 1 + +# Model architectures that has one single pathway. +_C.MODEL.SINGLE_PATHWAY_ARCH = ["c2d", "i3d", "slow"] + +# Model architectures that has multiple pathways. +_C.MODEL.MULTI_PATHWAY_ARCH = ["slowfast"] + +# Dropout rate before final projection in the backbone. +_C.MODEL.DROPOUT_RATE = 0.5 + +# The std to initialize the fc layer(s). +_C.MODEL.FC_INIT_STD = 0.01 + +# Activation layer for the output head. +_C.MODEL.HEAD_ACT = "softmax" + + +# ----------------------------------------------------------------------------- +# SlowFast options +# ----------------------------------------------------------------------------- +_C.SLOWFAST = CfgNode() + +# Corresponds to the inverse of the channel reduction ratio, $\beta$ between +# the Slow and Fast pathways. +_C.SLOWFAST.BETA_INV = 8 + +# Corresponds to the frame rate reduction ratio, $\alpha$ between the Slow and +# Fast pathways. +_C.SLOWFAST.ALPHA = 8 + +# Ratio of channel dimensions between the Slow and Fast pathways. +_C.SLOWFAST.FUSION_CONV_CHANNEL_RATIO = 2 + +# Kernel dimension used for fusing information from Fast pathway to Slow +# pathway. +_C.SLOWFAST.FUSION_KERNEL_SZ = 5 + + +# ----------------------------------------------------------------------------- +# Data options +# ----------------------------------------------------------------------------- +_C.DATA = CfgNode() + +# The path to the data directory. +_C.DATA.PATH_TO_DATA_DIR = "" + +_C.DATA.DATASET = "faceforensics" + +_C.DATA.MODE = "" + +_C.DATA.ADAPTIVE = False + +_C.DATA.SCALE = 1.0 +# The separator used between path and label. +_C.DATA.PATH_LABEL_SEPARATOR = " " + +# Video path prefix if any. +_C.DATA.PATH_PREFIX = "" + +# The spatial crop size of the input clip. +_C.DATA.CROP_SIZE = 224 + +# The number of frames of the input clip. +_C.DATA.NUM_FRAMES = 8 + +_C.DATA.NUM_FRAMES_RANGE = [1,2,3,4,5,6,7,8] + +# The video sampling rate of the input clip. +_C.DATA.SAMPLING_RATE = 8 + +# The mean value of the video raw pixels across the R G B channels. +_C.DATA.MEAN = [0.45, 0.45, 0.45] +# List of input frame channel dimensions. + +_C.DATA.INPUT_CHANNEL_NUM = [3, 3] + +# The std value of the video raw pixels across the R G B channels. +_C.DATA.STD = [0.225, 0.225, 0.225] + +# The spatial augmentation jitter scales for training. +_C.DATA.TRAIN_JITTER_SCALES = [256, 320] + +# The spatial crop size for training. +_C.DATA.TRAIN_CROP_SIZE = 224 + +# The spatial crop size for testing. +_C.DATA.TEST_CROP_SIZE = 256 + +# Input videos may has different fps, convert it to the target video fps before +# frame sampling. +_C.DATA.TARGET_FPS = 30 + +# Decoding backend, options include `pyav` or `torchvision` +_C.DATA.DECODING_BACKEND = "pyav" + +# if True, sample uniformly in [1 / max_scale, 1 / min_scale] and take a +# reciprocal to get the scale. If False, take a uniform sample from +# [min_scale, max_scale]. +_C.DATA.INV_UNIFORM_SAMPLE = False + +# If True, perform random horizontal flip on the video frames during training. +_C.DATA.RANDOM_FLIP = True + +# If True, calculdate the map as metric. +_C.DATA.MULTI_LABEL = False + +# Method to perform the ensemble, options include "sum" and "max". +_C.DATA.ENSEMBLE_METHOD = "sum" + +# If True, revert the default input channel (RBG <-> BGR). +_C.DATA.REVERSE_INPUT_CHANNEL = False + + +# ---------------------------------------------------------------------------- # +# Optimizer options +# ---------------------------------------------------------------------------- # +_C.SOLVER = CfgNode() + +# Base learning rate. +_C.SOLVER.BASE_LR = 0.1 + +# Learning rate policy (see utils/lr_policy.py for options and examples). +_C.SOLVER.LR_POLICY = "cosine" + +# Exponential decay factor. +_C.SOLVER.GAMMA = 0.1 + +# Step size for 'exp' and 'cos' policies (in epochs). +_C.SOLVER.STEP_SIZE = 1 + +# Steps for 'steps_' policies (in epochs). +_C.SOLVER.STEPS = [] + +# Learning rates for 'steps_' policies. +_C.SOLVER.LRS = [] + +# Maximal number of epochs. +_C.SOLVER.MAX_EPOCH = 300 + +# Momentum. +_C.SOLVER.MOMENTUM = 0.9 + +# Momentum dampening. +_C.SOLVER.DAMPENING = 0.0 + +# Nesterov momentum. +_C.SOLVER.NESTEROV = True + +# L2 regularization. +_C.SOLVER.WEIGHT_DECAY = 1e-4 + +# Start the warm up from SOLVER.BASE_LR * SOLVER.WARMUP_FACTOR. +_C.SOLVER.WARMUP_FACTOR = 0.1 + +# Gradually warm up the SOLVER.BASE_LR over this number of epochs. +_C.SOLVER.WARMUP_EPOCHS = 0.0 + +# The start learning rate of the warm up. +_C.SOLVER.WARMUP_START_LR = 0.01 + +# Optimization method. +_C.SOLVER.OPTIMIZING_METHOD = "sgd" + +_C.SOLVER.LR_STEP = 50000 + +_C.SOLVER.TOTAL_STEP = 200000 + +_C.SOLVER.FREEZE_STEP = 10000 + + +# ---------------------------------------------------------------------------- # +# Misc options +# ---------------------------------------------------------------------------- # + +# Number of GPUs to use (applies to both training and testing). +_C.NUM_GPUS = 1 + +# Number of machine to use for the job. +_C.NUM_SHARDS = 1 + +# The index of the current machine. +_C.SHARD_ID = 0 + +# Output basedir. +_C.OUTPUT_DIR = "./tmp" + +# train module +_C.TRAIN_MODULE= "train_unet_by_iter" + +# Note that non-determinism may still be present due to non-deterministic +# operator implementations in GPU operator libraries. +_C.RNG_SEED = 1 + +# Log period in iters. +_C.LOG_PERIOD = 10 + +# If True, log the model info. +_C.LOG_MODEL_INFO = True + +# Distributed backend. +_C.DIST_BACKEND = "nccl" + +# ---------------------------------------------------------------------------- # +# Benchmark options +# ---------------------------------------------------------------------------- # +_C.BENCHMARK = CfgNode() + +# Number of epochs for data loading benchmark. +_C.BENCHMARK.NUM_EPOCHS = 5 + +# Log period in iters for data loading benchmark. +_C.BENCHMARK.LOG_PERIOD = 100 + +# If True, shuffle dataloader for epoch during benchmark. +_C.BENCHMARK.SHUFFLE = True + + +# ---------------------------------------------------------------------------- # +# Common train/test data loader options +# ---------------------------------------------------------------------------- # +_C.DATA_LOADER = CfgNode() + +# Number of data loader workers per training process. +_C.DATA_LOADER.NUM_WORKERS = 8 + +# Load data to pinned host memory. +_C.DATA_LOADER.PIN_MEMORY = True + +# Enable multi thread decoding. +_C.DATA_LOADER.ENABLE_MULTI_THREAD_DECODE = False + + +# ---------------------------------------------------------------------------- # +# Detection options. +# ---------------------------------------------------------------------------- # +_C.DETECTION = CfgNode() + +# Whether enable video detection. +_C.DETECTION.ENABLE = False + +# Aligned version of RoI. More details can be found at slowfast/models/head_helper.py +_C.DETECTION.ALIGNED = True + +# Spatial scale factor. +_C.DETECTION.SPATIAL_SCALE_FACTOR = 16 + +# RoI tranformation resolution. +_C.DETECTION.ROI_XFORM_RESOLUTION = 7 + + +# ----------------------------------------------------------------------------- +# AVA Dataset options +# ----------------------------------------------------------------------------- +_C.AVA = CfgNode() + +# Directory path of frames. +_C.AVA.FRAME_DIR = "/mnt/fair-flash3-east/ava_trainval_frames.img/" + +# Directory path for files of frame lists. +_C.AVA.FRAME_LIST_DIR = ( + "/mnt/vol/gfsai-flash3-east/ai-group/users/haoqifan/ava/frame_list/" +) + +# Directory path for annotation files. +_C.AVA.ANNOTATION_DIR = ( + "/mnt/vol/gfsai-flash3-east/ai-group/users/haoqifan/ava/frame_list/" +) + +# Filenames of training samples list files. +_C.AVA.TRAIN_LISTS = ["train.csv"] + +# Filenames of test samples list files. +_C.AVA.TEST_LISTS = ["val.csv"] + +# Filenames of box list files for training. Note that we assume files which +# contains predicted boxes will have a suffix "predicted_boxes" in the +# filename. +_C.AVA.TRAIN_GT_BOX_LISTS = ["ava_train_v2.2.csv"] +_C.AVA.TRAIN_PREDICT_BOX_LISTS = [] + +# Filenames of box list files for test. +_C.AVA.TEST_PREDICT_BOX_LISTS = ["ava_val_predicted_boxes.csv"] + +# This option controls the score threshold for the predicted boxes to use. +_C.AVA.DETECTION_SCORE_THRESH = 0.9 + +# If use BGR as the format of input frames. +_C.AVA.BGR = False + +# Training augmentation parameters +# Whether to use color augmentation method. +_C.AVA.TRAIN_USE_COLOR_AUGMENTATION = False + +# Whether to only use PCA jitter augmentation when using color augmentation +# method (otherwise combine with color jitter method). +_C.AVA.TRAIN_PCA_JITTER_ONLY = True + +# Eigenvalues for PCA jittering. Note PCA is RGB based. +_C.AVA.TRAIN_PCA_EIGVAL = [0.225, 0.224, 0.229] + +# Eigenvectors for PCA jittering. +_C.AVA.TRAIN_PCA_EIGVEC = [ + [-0.5675, 0.7192, 0.4009], + [-0.5808, -0.0045, -0.8140], + [-0.5836, -0.6948, 0.4203], +] + +# Whether to do horizontal flipping during test. +_C.AVA.TEST_FORCE_FLIP = False + +# Whether to use full test set for validation split. +_C.AVA.FULL_TEST_ON_VAL = False + +# The name of the file to the ava label map. +_C.AVA.LABEL_MAP_FILE = "ava_action_list_v2.2_for_activitynet_2019.pbtxt" + +# The name of the file to the ava exclusion. +_C.AVA.EXCLUSION_FILE = "ava_val_excluded_timestamps_v2.2.csv" + +# The name of the file to the ava groundtruth. +_C.AVA.GROUNDTRUTH_FILE = "ava_val_v2.2.csv" + +# Backend to process image, includes `pytorch` and `cv2`. +_C.AVA.IMG_PROC_BACKEND = "cv2" + +# ---------------------------------------------------------------------------- # +# Multigrid training options +# See https://arxiv.org/abs/1912.00998 for details about multigrid training. +# ---------------------------------------------------------------------------- # +_C.MULTIGRID = CfgNode() + +# Multigrid training allows us to train for more epochs with fewer iterations. +# This hyperparameter specifies how many times more epochs to train. +# The default setting in paper trains for 1.5x more epochs than baseline. +_C.MULTIGRID.EPOCH_FACTOR = 1.5 + +# Enable short cycles. +_C.MULTIGRID.SHORT_CYCLE = False +# Short cycle additional spatial dimensions relative to the default crop size. +_C.MULTIGRID.SHORT_CYCLE_FACTORS = [0.5, 0.5 ** 0.5] + +_C.MULTIGRID.LONG_CYCLE = False +# (Temporal, Spatial) dimensions relative to the default shape. +_C.MULTIGRID.LONG_CYCLE_FACTORS = [ + (0.25, 0.5 ** 0.5), + (0.5, 0.5 ** 0.5), + (0.5, 1), + (1, 1), +] + +# While a standard BN computes stats across all examples in a GPU, +# for multigrid training we fix the number of clips to compute BN stats on. +# See https://arxiv.org/abs/1912.00998 for details. +_C.MULTIGRID.BN_BASE_SIZE = 8 + +# Multigrid training epochs are not proportional to actual training time or +# computations, so _C.TRAIN.EVAL_PERIOD leads to too frequent or rare +# evaluation. We use a multigrid-specific rule to determine when to evaluate: +# This hyperparameter defines how many times to evaluate a model per long +# cycle shape. +_C.MULTIGRID.EVAL_FREQ = 3 + +# No need to specify; Set automatically and used as global variables. +_C.MULTIGRID.LONG_CYCLE_SAMPLING_RATE = 0 +_C.MULTIGRID.DEFAULT_B = 0 +_C.MULTIGRID.DEFAULT_T = 0 +_C.MULTIGRID.DEFAULT_S = 0 + +# ----------------------------------------------------------------------------- +# Tensorboard Visualization Options +# ----------------------------------------------------------------------------- +_C.TENSORBOARD = CfgNode() + +# Log to summary writer, this will automatically. +# log loss, lr and metrics during train/eval. +_C.TENSORBOARD.ENABLE = False +# Provide path to prediction results for visualization. +# This is a pickle file of [prediction_tensor, label_tensor] +_C.TENSORBOARD.PREDICTIONS_PATH = "" +# Path to directory for tensorboard logs. +# Default to to cfg.OUTPUT_DIR/runs-{cfg.TRAIN.DATASET}. +_C.TENSORBOARD.LOG_DIR = "" +# Path to a json file providing class_name - id mapping +# in the format {"class_name1": id1, "class_name2": id2, ...}. +# This file must be provided to enable plotting confusion matrix +# by a subset or parent categories. +_C.TENSORBOARD.CLASS_NAMES_PATH = "" + +# Path to a json file for categories -> classes mapping +# in the format {"parent_class": ["child_class1", "child_class2",...], ...}. +_C.TENSORBOARD.CATEGORIES_PATH = "" + +# Config for confusion matrices visualization. +_C.TENSORBOARD.CONFUSION_MATRIX = CfgNode() +# Visualize confusion matrix. +_C.TENSORBOARD.CONFUSION_MATRIX.ENABLE = False +# Figure size of the confusion matrices plotted. +_C.TENSORBOARD.CONFUSION_MATRIX.FIGSIZE = [8, 8] +# Path to a subset of categories to visualize. +# File contains class names separated by newline characters. +_C.TENSORBOARD.CONFUSION_MATRIX.SUBSET_PATH = "" + +# Config for histogram visualization. +_C.TENSORBOARD.HISTOGRAM = CfgNode() +# Visualize histograms. +_C.TENSORBOARD.HISTOGRAM.ENABLE = False +# Path to a subset of classes to plot histograms. +# Class names must be separated by newline characters. +_C.TENSORBOARD.HISTOGRAM.SUBSET_PATH = "" +# Visualize top-k most predicted classes on histograms for each +# chosen true label. +_C.TENSORBOARD.HISTOGRAM.TOPK = 10 +# Figure size of the histograms plotted. +_C.TENSORBOARD.HISTOGRAM.FIGSIZE = [8, 8] + +# Config for layers' weights and activations visualization. +# _C.TENSORBOARD.ENABLE must be True. +_C.TENSORBOARD.MODEL_VIS = CfgNode() + +# If False, skip model visualization. +_C.TENSORBOARD.MODEL_VIS.ENABLE = False + +# If False, skip visualizing model weights. +_C.TENSORBOARD.MODEL_VIS.MODEL_WEIGHTS = False + +# If False, skip visualizing model activations. +_C.TENSORBOARD.MODEL_VIS.ACTIVATIONS = False + +# If False, skip visualizing input videos. +_C.TENSORBOARD.MODEL_VIS.INPUT_VIDEO = False + + +# List of strings containing data about layer names and their indexing to +# visualize weights and activations for. The indexing is meant for +# choosing a subset of activations outputed by a layer for visualization. +# If indexing is not specified, visualize all activations outputed by the layer. +# For each string, layer name and indexing is separated by whitespaces. +# e.g.: [layer1 1,2;1,2, layer2, layer3 150,151;3,4]; this means for each array `arr` +# along the batch dimension in `layer1`, we take arr[[1, 2], [1, 2]] +_C.TENSORBOARD.MODEL_VIS.LAYER_LIST = [] +# Top-k predictions to plot on videos +_C.TENSORBOARD.MODEL_VIS.TOPK_PREDS = 1 +# Colormap to for text boxes and bounding boxes colors +_C.TENSORBOARD.MODEL_VIS.COLORMAP = "Pastel2" +# Config for visualization video inputs with Grad-CAM. +# _C.TENSORBOARD.ENABLE must be True. +_C.TENSORBOARD.MODEL_VIS.GRAD_CAM = CfgNode() +# Whether to run visualization using Grad-CAM technique. +_C.TENSORBOARD.MODEL_VIS.GRAD_CAM.ENABLE = True +# CNN layers to use for Grad-CAM. The number of layers must be equal to +# number of pathway(s). +_C.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST = [] +# If True, visualize Grad-CAM using true labels for each instances. +# If False, use the highest predicted class. +_C.TENSORBOARD.MODEL_VIS.GRAD_CAM.USE_TRUE_LABEL = False +# Colormap to for text boxes and bounding boxes colors +_C.TENSORBOARD.MODEL_VIS.GRAD_CAM.COLORMAP = "viridis" + +# Config for visualization for wrong prediction visualization. +# _C.TENSORBOARD.ENABLE must be True. +_C.TENSORBOARD.WRONG_PRED_VIS = CfgNode() +_C.TENSORBOARD.WRONG_PRED_VIS.ENABLE = False +# Folder tag to origanize model eval videos under. +_C.TENSORBOARD.WRONG_PRED_VIS.TAG = "Incorrectly classified videos." +# Subset of labels to visualize. Only wrong predictions with true labels +# within this subset is visualized. +_C.TENSORBOARD.WRONG_PRED_VIS.SUBSET_PATH = "" + + + +############### +_C.JITTER = CfgNode() + +_C.JITTER.ENABLE = False + +_C.JITTER.CONTINUS_METHODS=["blend_diff_person","blend_downsampled","blend_same_person"] +_C.JITTER.DISCONTINUS_METHODS=["light", "rotate", "skip"] + +_C.JITTER.STRONG_INNER_CLIP_MASK_JITTER= False + +# ---------------------------------------------------------------------------- # +# Demo options +# ---------------------------------------------------------------------------- # +_C.DEMO = CfgNode() + +# Run model in DEMO mode. +_C.DEMO.ENABLE = False + +# Path to a json file providing class_name - id mapping +# in the format {"class_name1": id1, "class_name2": id2, ...}. +_C.DEMO.LABEL_FILE_PATH = "" + +# Specify a camera device as input. This will be prioritized +# over input video if set. +# If -1, use input video instead. +_C.DEMO.WEBCAM = -1 + +# Path to input video for demo. +_C.DEMO.INPUT_VIDEO = "" +# Custom width for reading input video data. +_C.DEMO.DISPLAY_WIDTH = 0 +# Custom height for reading input video data. +_C.DEMO.DISPLAY_HEIGHT = 0 +# Path to Detectron2 object detection model configuration, +# only used for detection tasks. +_C.DEMO.DETECTRON2_CFG = "COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml" +# Path to Detectron2 object detection model pre-trained weights. +_C.DEMO.DETECTRON2_WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_R_50_FPN_3x/137849458/model_final_280758.pkl" +# Threshold for choosing predicted bounding boxes by Detectron2. +_C.DEMO.DETECTRON2_THRESH = 0.9 +# Number of overlapping frames between 2 consecutive clips. +# Increase this number for more frequent action predictions. +# The number of overlapping frames cannot be larger than +# half of the sequence length `cfg.DATA.NUM_FRAMES * cfg.DATA.SAMPLING_RATE` +_C.DEMO.BUFFER_SIZE = 0 +# If specified, the visualized outputs will be written this a video file of +# this path. Otherwise, the visualized outputs will be displayed in a window. +_C.DEMO.OUTPUT_FILE = "" +# Frames per second rate for writing to output video file. +# If not set (-1), use fps rate from input file. +_C.DEMO.OUTPUT_FPS = -1 +# Input format from demo video reader ("RGB" or "BGR"). +_C.DEMO.INPUT_FORMAT = "BGR" +# Draw visualization frames in [keyframe_idx - CLIP_VIS_SIZE, keyframe_idx + CLIP_VIS_SIZE] inclusively. +_C.DEMO.CLIP_VIS_SIZE = 10 +# Number of processes to run video visualizer. +_C.DEMO.NUM_VIS_INSTANCES = 2 + +# Path to pre-computed predicted boxes +_C.DEMO.PREDS_BOXES = "" +# Whether to run in with multi-threaded video reader. +_C.DEMO.THREAD_ENABLE = False +# Take one clip for every `DEMO.NUM_CLIPS_SKIP` + 1 for prediction and visualization. +# This is used for fast demo speed by reducing the prediction/visualiztion frequency. +# If -1, take the most recent read clip for visualization. This mode is only supported +# if `DEMO.THREAD_ENABLE` is set to True. +_C.DEMO.NUM_CLIPS_SKIP = 0 +# Path to ground-truth boxes and labels (optional) +_C.DEMO.GT_BOXES = "" +# The starting second of the video w.r.t bounding boxes file. +_C.DEMO.STARTING_SECOND = 900 +# Frames per second of the input video/folder of images. +_C.DEMO.FPS = 30 +# Visualize with top-k predictions or predictions above certain threshold(s). +# Option: {"thres", "top-k"} +_C.DEMO.VIS_MODE = "thres" +# Threshold for common class names. +_C.DEMO.COMMON_CLASS_THRES = 0.7 +# Theshold for uncommon class names. This will not be +# used if `_C.DEMO.COMMON_CLASS_NAMES` is empty. +_C.DEMO.UNCOMMON_CLASS_THRES = 0.3 +# This is chosen based on distribution of examples in +# each classes in AVA dataset. +_C.DEMO.COMMON_CLASS_NAMES = [ + "watch (a person)", + "talk to (e.g., self, a person, a group)", + "listen to (a person)", + "touch (an object)", + "carry/hold (an object)", + "walk", + "sit", + "lie/sleep", + "bend/bow (at the waist)", +] +# Slow-motion rate for the visualization. The visualized portions of the +# video will be played `_C.DEMO.SLOWMO` times slower than usual speed. +_C.DEMO.SLOWMO = 1 + +# Add custom config with default values. +custom_config.add_custom_config(_C) + + +def _assert_and_infer_cfg(cfg): + # BN assertions. + if cfg.BN.USE_PRECISE_STATS: + assert cfg.BN.NUM_BATCHES_PRECISE >= 0 + # TRAIN assertions. + assert cfg.TRAIN.CHECKPOINT_TYPE in ["pytorch", "caffe2"] + assert cfg.TRAIN.BATCH_SIZE % cfg.NUM_GPUS == 0 + + # TEST assertions. + assert cfg.TEST.CHECKPOINT_TYPE in ["pytorch", "caffe2"] + assert cfg.TEST.BATCH_SIZE % cfg.NUM_GPUS == 0 + assert cfg.TEST.NUM_SPATIAL_CROPS == 3 + + # RESNET assertions. + assert cfg.RESNET.NUM_GROUPS > 0 + assert cfg.RESNET.WIDTH_PER_GROUP > 0 + assert cfg.RESNET.WIDTH_PER_GROUP % cfg.RESNET.NUM_GROUPS == 0 + + # General assertions. + assert cfg.SHARD_ID < cfg.NUM_SHARDS + return cfg + + +def get_cfg(): + """ + Get a copy of the default config. + """ + return _assert_and_infer_cfg(_C.clone()) diff --git a/training/detectors/utils/slowfast/config/defaults.py b/training/detectors/utils/slowfast/config/defaults.py new file mode 100644 index 0000000000000000000000000000000000000000..083a3c1f31c822143f7a0a68b374ae8b83430b3c --- /dev/null +++ b/training/detectors/utils/slowfast/config/defaults.py @@ -0,0 +1,816 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Configs.""" +import yaml +from fvcore.common.config import CfgNode as CfgNodeOri + +from . import custom_config +def load_yaml_with_base(text: str, allow_unsafe: bool = False): + """ + Just like `yaml.load(open(filename))`, but inherit attributes from its + `_BASE_`. + Args: + text (str): the file name of the current config. Will be used to + find the base config file. + allow_unsafe (bool): whether to allow loading the config file with + `yaml.unsafe_load`. + Returns: + (dict): the loaded yaml + """ + cfg = yaml.load(text, Loader=yaml.FullLoader) + return cfg +class CfgNode(CfgNodeOri): + def merge_from_str(self, text, allow_unsafe=False): + loaded_cfg = load_yaml_with_base(text, allow_unsafe=allow_unsafe) + loaded_cfg = type(self)(loaded_cfg) + self.merge_from_other_cfg(loaded_cfg) + +# ----------------------------------------------------------------------------- +# Config definition +# ----------------------------------------------------------------------------- +_C = CfgNode() + + +# ---------------------------------------------------------------------------- # +# Batch norm options +# ---------------------------------------------------------------------------- # +_C.BN = CfgNode() + +# Precise BN stats. +_C.BN.USE_PRECISE_STATS = False + +# Number of samples use to compute precise bn. +_C.BN.NUM_BATCHES_PRECISE = 200 + +# Weight decay value that applies on BN. +_C.BN.WEIGHT_DECAY = 0.0 + +# Norm type, options include `batchnorm`, `sub_batchnorm`, `sync_batchnorm` +_C.BN.NORM_TYPE = "batchnorm" + +# Parameter for SubBatchNorm, where it splits the batch dimension into +# NUM_SPLITS splits, and run BN on each of them separately independently. +_C.BN.NUM_SPLITS = 1 + +# Parameter for NaiveSyncBatchNorm3d, where the stats across `NUM_SYNC_DEVICES` +# devices will be synchronized. +_C.BN.NUM_SYNC_DEVICES = 1 + + +# ---------------------------------------------------------------------------- # +# Training options. +# ---------------------------------------------------------------------------- # +_C.TRAIN = CfgNode() + +# If True Train the model, else skip training. +_C.TRAIN.ENABLE = True + +# Dataset. +_C.TRAIN.DATASET = "kinetics" + +# Total mini-batch size. +_C.TRAIN.BATCH_SIZE = 64 + +_C.TRAIN.SPLIT = "train_subset2.pth" +# Evaluate model on test data every eval period epochs. +_C.TRAIN.EVAL_PERIOD = 1 + +# Save model checkpoint every checkpoint period epochs. +_C.TRAIN.CHECKPOINT_PERIOD = 1 + +# Save model checkpoint every checkpoint period iters. +_C.TRAIN.CHECKPOINT_PERIOD_BY_ITER = 500 + + +# Resume training from the latest checkpoint in the output directory. +_C.TRAIN.AUTO_RESUME = True + +# Path to the checkpoint to load the initial weight. +_C.TRAIN.CHECKPOINT_FILE_PATH = "" + +# Checkpoint types include `caffe2` or `pytorch`. +_C.TRAIN.CHECKPOINT_TYPE = "pytorch" + +# If True, perform inflation when loading checkpoint. +_C.TRAIN.CHECKPOINT_INFLATE = False + + +# ---------------------------------------------------------------------------- # +# Testing options +# ---------------------------------------------------------------------------- # +_C.TEST = CfgNode() + +# If True test the model, else skip the testing. +_C.TEST.ENABLE = True + +# Dataset for testing. +_C.TEST.DATASET = "kinetics" + +_C.TEST.SPLIT = "test_subset2.pth" +# Total mini-batch size +_C.TEST.BATCH_SIZE = 8 + +# Path to the checkpoint to load the initial weight. +_C.TEST.CHECKPOINT_FILE_PATH = "" + +# Number of clips to sample from a video uniformly for aggregating the +# prediction results. +_C.TEST.NUM_ENSEMBLE_VIEWS = 10 + +# Number of crops to sample from a frame spatially for aggregating the +# prediction results. +_C.TEST.NUM_SPATIAL_CROPS = 3 + +# Checkpoint types include `caffe2` or `pytorch`. +_C.TEST.CHECKPOINT_TYPE = "pytorch" +# Path to saving prediction results file. +_C.TEST.SAVE_RESULTS_PATH = "" +# ----------------------------------------------------------------------------- +# ResNet options +# ----------------------------------------------------------------------------- +_C.RESNET = CfgNode() + +# Transformation function. +_C.RESNET.TRANS_FUNC = "bottleneck_transform" + +# Number of groups. 1 for ResNet, and larger than 1 for ResNeXt). +_C.RESNET.NUM_GROUPS = 1 + +# Width of each group (64 -> ResNet; 4 -> ResNeXt). +_C.RESNET.WIDTH_PER_GROUP = 64 + +# Apply relu in a inplace manner. +_C.RESNET.INPLACE_RELU = True + +# Apply stride to 1x1 conv. +_C.RESNET.STRIDE_1X1 = False + +# If true, initialize the gamma of the final BN of each block to zero. +_C.RESNET.ZERO_INIT_FINAL_BN = False + +# Number of weight layers. +_C.RESNET.DEPTH = 50 + + +# label of branchs +_C.RESNET.LABELS = ["continus","discontinus"] + +# If the current block has more than NUM_BLOCK_TEMP_KERNEL blocks, use temporal +# kernel of 1 for the rest of the blocks. +_C.RESNET.NUM_BLOCK_TEMP_KERNEL = [[3], [4], [6], [3]] + +# Size of stride on different res stages. +_C.RESNET.SPATIAL_STRIDES = [[1], [2], [2], [2]] + +# Size of dilation on different res stages. +_C.RESNET.SPATIAL_DILATIONS = [[1], [1], [1], [1]] + + +# ----------------------------------------------------------------------------- +# Nonlocal options +# ----------------------------------------------------------------------------- +_C.NONLOCAL = CfgNode() + +# Index of each stage and block to add nonlocal layers. +_C.NONLOCAL.LOCATION = [[[]], [[]], [[]], [[]]] + +# Number of group for nonlocal for each stage. +_C.NONLOCAL.GROUP = [[1], [1], [1], [1]] + +# Instatiation to use for non-local layer. +_C.NONLOCAL.INSTANTIATION = "dot_product" + + +# Size of pooling layers used in Non-Local. +_C.NONLOCAL.POOL = [ + # Res2 + [[1, 2, 2], [1, 2, 2]], + # Res3 + [[1, 2, 2], [1, 2, 2]], + # Res4 + [[1, 2, 2], [1, 2, 2]], + # Res5 + [[1, 2, 2], [1, 2, 2]], +] + +# ----------------------------------------------------------------------------- +# Model options +# ----------------------------------------------------------------------------- +_C.MODEL = CfgNode() + +# Model architecture. +_C.MODEL.ARCH = "slowfast" + +# Model name +_C.MODEL.MODEL_NAME = "SlowFast" + +# The number of classes to predict for the model. +_C.MODEL.NUM_CLASSES = 400 + +# Loss function. +_C.MODEL.LOSS_FUNC = "cross_entropy" + +_C.MODEL.MASK_WEIGHT = 100 + +_C.MODEL.CLASS_WEIGHT = 1 + +# Model architectures that has one single pathway. +_C.MODEL.SINGLE_PATHWAY_ARCH = ["c2d", "i3d", "slow"] + +# Model architectures that has multiple pathways. +_C.MODEL.MULTI_PATHWAY_ARCH = ["slowfast"] + +# Dropout rate before final projection in the backbone. +_C.MODEL.DROPOUT_RATE = 0.5 + +# The std to initialize the fc layer(s). +_C.MODEL.FC_INIT_STD = 0.01 + +# Activation layer for the output head. +_C.MODEL.HEAD_ACT = "softmax" + + +# ----------------------------------------------------------------------------- +# SlowFast options +# ----------------------------------------------------------------------------- +_C.SLOWFAST = CfgNode() + +# Corresponds to the inverse of the channel reduction ratio, $\beta$ between +# the Slow and Fast pathways. +_C.SLOWFAST.BETA_INV = 8 + +# Corresponds to the frame rate reduction ratio, $\alpha$ between the Slow and +# Fast pathways. +_C.SLOWFAST.ALPHA = 8 + +# Ratio of channel dimensions between the Slow and Fast pathways. +_C.SLOWFAST.FUSION_CONV_CHANNEL_RATIO = 2 + +# Kernel dimension used for fusing information from Fast pathway to Slow +# pathway. +_C.SLOWFAST.FUSION_KERNEL_SZ = 5 + + +# ----------------------------------------------------------------------------- +# Data options +# ----------------------------------------------------------------------------- +_C.DATA = CfgNode() + +# The path to the data directory. +_C.DATA.PATH_TO_DATA_DIR = "" + +_C.DATA.DATASET = "faceforensics" + +_C.DATA.MODE = "" + +_C.DATA.ADAPTIVE = False + +_C.DATA.SCALE = 1.0 +# The separator used between path and label. +_C.DATA.PATH_LABEL_SEPARATOR = " " + +# Video path prefix if any. +_C.DATA.PATH_PREFIX = "" + +# The spatial crop size of the input clip. +_C.DATA.CROP_SIZE = 224 + +# The number of frames of the input clip. +_C.DATA.NUM_FRAMES = 8 + +_C.DATA.NUM_FRAMES_RANGE = [1,2,3,4,5,6,7,8] + +# The video sampling rate of the input clip. +_C.DATA.SAMPLING_RATE = 8 + +# The mean value of the video raw pixels across the R G B channels. +_C.DATA.MEAN = [0.45, 0.45, 0.45] +# List of input frame channel dimensions. + +_C.DATA.INPUT_CHANNEL_NUM = [3, 3] + +# The std value of the video raw pixels across the R G B channels. +_C.DATA.STD = [0.225, 0.225, 0.225] + +# The spatial augmentation jitter scales for training. +_C.DATA.TRAIN_JITTER_SCALES = [256, 320] + +# The spatial crop size for training. +_C.DATA.TRAIN_CROP_SIZE = 224 + +# The spatial crop size for testing. +_C.DATA.TEST_CROP_SIZE = 256 + +# Input videos may has different fps, convert it to the target video fps before +# frame sampling. +_C.DATA.TARGET_FPS = 30 + +# Decoding backend, options include `pyav` or `torchvision` +_C.DATA.DECODING_BACKEND = "pyav" + +# if True, sample uniformly in [1 / max_scale, 1 / min_scale] and take a +# reciprocal to get the scale. If False, take a uniform sample from +# [min_scale, max_scale]. +_C.DATA.INV_UNIFORM_SAMPLE = False + +# If True, perform random horizontal flip on the video frames during training. +_C.DATA.RANDOM_FLIP = True + +# If True, calculdate the map as metric. +_C.DATA.MULTI_LABEL = False + +# Method to perform the ensemble, options include "sum" and "max". +_C.DATA.ENSEMBLE_METHOD = "sum" + +# If True, revert the default input channel (RBG <-> BGR). +_C.DATA.REVERSE_INPUT_CHANNEL = False + + +# ---------------------------------------------------------------------------- # +# Optimizer options +# ---------------------------------------------------------------------------- # +_C.SOLVER = CfgNode() + +# Base learning rate. +_C.SOLVER.BASE_LR = 0.1 + +# Learning rate policy (see utils/lr_policy.py for options and examples). +_C.SOLVER.LR_POLICY = "cosine" + +# Exponential decay factor. +_C.SOLVER.GAMMA = 0.1 + +# Step size for 'exp' and 'cos' policies (in epochs). +_C.SOLVER.STEP_SIZE = 1 + +# Steps for 'steps_' policies (in epochs). +_C.SOLVER.STEPS = [] + +# Learning rates for 'steps_' policies. +_C.SOLVER.LRS = [] + +# Maximal number of epochs. +_C.SOLVER.MAX_EPOCH = 300 + +# Momentum. +_C.SOLVER.MOMENTUM = 0.9 + +# Momentum dampening. +_C.SOLVER.DAMPENING = 0.0 + +# Nesterov momentum. +_C.SOLVER.NESTEROV = True + +# L2 regularization. +_C.SOLVER.WEIGHT_DECAY = 1e-4 + +# Start the warm up from SOLVER.BASE_LR * SOLVER.WARMUP_FACTOR. +_C.SOLVER.WARMUP_FACTOR = 0.1 + +# Gradually warm up the SOLVER.BASE_LR over this number of epochs. +_C.SOLVER.WARMUP_EPOCHS = 0.0 + +# The start learning rate of the warm up. +_C.SOLVER.WARMUP_START_LR = 0.01 + +# Optimization method. +_C.SOLVER.OPTIMIZING_METHOD = "sgd" + +_C.SOLVER.LR_STEP = 50000 + +_C.SOLVER.TOTAL_STEP = 200000 + +_C.SOLVER.FREEZE_STEP = 10000 + + +# ---------------------------------------------------------------------------- # +# Misc options +# ---------------------------------------------------------------------------- # + +# Number of GPUs to use (applies to both training and testing). +_C.NUM_GPUS = 1 + +# Number of machine to use for the job. +_C.NUM_SHARDS = 1 + +# The index of the current machine. +_C.SHARD_ID = 0 + +# Output basedir. +_C.OUTPUT_DIR = "./tmp" + +# train module +_C.TRAIN_MODULE= "train_unet_by_iter" + +# Note that non-determinism may still be present due to non-deterministic +# operator implementations in GPU operator libraries. +_C.RNG_SEED = 1 + +# Log period in iters. +_C.LOG_PERIOD = 10 + +# If True, log the model info. +_C.LOG_MODEL_INFO = True + +# Distributed backend. +_C.DIST_BACKEND = "nccl" + +# ---------------------------------------------------------------------------- # +# Benchmark options +# ---------------------------------------------------------------------------- # +_C.BENCHMARK = CfgNode() + +# Number of epochs for data loading benchmark. +_C.BENCHMARK.NUM_EPOCHS = 5 + +# Log period in iters for data loading benchmark. +_C.BENCHMARK.LOG_PERIOD = 100 + +# If True, shuffle dataloader for epoch during benchmark. +_C.BENCHMARK.SHUFFLE = True + + +# ---------------------------------------------------------------------------- # +# Common train/test data loader options +# ---------------------------------------------------------------------------- # +_C.DATA_LOADER = CfgNode() + +# Number of data loader workers per training process. +_C.DATA_LOADER.NUM_WORKERS = 8 + +# Load data to pinned host memory. +_C.DATA_LOADER.PIN_MEMORY = True + +# Enable multi thread decoding. +_C.DATA_LOADER.ENABLE_MULTI_THREAD_DECODE = False + + +# ---------------------------------------------------------------------------- # +# Detection options. +# ---------------------------------------------------------------------------- # +_C.DETECTION = CfgNode() + +# Whether enable video detection. +_C.DETECTION.ENABLE = False + +# Aligned version of RoI. More details can be found at slowfast/models/head_helper.py +_C.DETECTION.ALIGNED = True + +# Spatial scale factor. +_C.DETECTION.SPATIAL_SCALE_FACTOR = 16 + +# RoI tranformation resolution. +_C.DETECTION.ROI_XFORM_RESOLUTION = 7 + + +# ----------------------------------------------------------------------------- +# AVA Dataset options +# ----------------------------------------------------------------------------- +_C.AVA = CfgNode() + +# Directory path of frames. +_C.AVA.FRAME_DIR = "/mnt/fair-flash3-east/ava_trainval_frames.img/" + +# Directory path for files of frame lists. +_C.AVA.FRAME_LIST_DIR = ( + "/mnt/vol/gfsai-flash3-east/ai-group/users/haoqifan/ava/frame_list/" +) + +# Directory path for annotation files. +_C.AVA.ANNOTATION_DIR = ( + "/mnt/vol/gfsai-flash3-east/ai-group/users/haoqifan/ava/frame_list/" +) + +# Filenames of training samples list files. +_C.AVA.TRAIN_LISTS = ["train.csv"] + +# Filenames of test samples list files. +_C.AVA.TEST_LISTS = ["val.csv"] + +# Filenames of box list files for training. Note that we assume files which +# contains predicted boxes will have a suffix "predicted_boxes" in the +# filename. +_C.AVA.TRAIN_GT_BOX_LISTS = ["ava_train_v2.2.csv"] +_C.AVA.TRAIN_PREDICT_BOX_LISTS = [] + +# Filenames of box list files for test. +_C.AVA.TEST_PREDICT_BOX_LISTS = ["ava_val_predicted_boxes.csv"] + +# This option controls the score threshold for the predicted boxes to use. +_C.AVA.DETECTION_SCORE_THRESH = 0.9 + +# If use BGR as the format of input frames. +_C.AVA.BGR = False + +# Training augmentation parameters +# Whether to use color augmentation method. +_C.AVA.TRAIN_USE_COLOR_AUGMENTATION = False + +# Whether to only use PCA jitter augmentation when using color augmentation +# method (otherwise combine with color jitter method). +_C.AVA.TRAIN_PCA_JITTER_ONLY = True + +# Eigenvalues for PCA jittering. Note PCA is RGB based. +_C.AVA.TRAIN_PCA_EIGVAL = [0.225, 0.224, 0.229] + +# Eigenvectors for PCA jittering. +_C.AVA.TRAIN_PCA_EIGVEC = [ + [-0.5675, 0.7192, 0.4009], + [-0.5808, -0.0045, -0.8140], + [-0.5836, -0.6948, 0.4203], +] + +# Whether to do horizontal flipping during test. +_C.AVA.TEST_FORCE_FLIP = False + +# Whether to use full test set for validation split. +_C.AVA.FULL_TEST_ON_VAL = False + +# The name of the file to the ava label map. +_C.AVA.LABEL_MAP_FILE = "ava_action_list_v2.2_for_activitynet_2019.pbtxt" + +# The name of the file to the ava exclusion. +_C.AVA.EXCLUSION_FILE = "ava_val_excluded_timestamps_v2.2.csv" + +# The name of the file to the ava groundtruth. +_C.AVA.GROUNDTRUTH_FILE = "ava_val_v2.2.csv" + +# Backend to process image, includes `pytorch` and `cv2`. +_C.AVA.IMG_PROC_BACKEND = "cv2" + +# ---------------------------------------------------------------------------- # +# Multigrid training options +# See https://arxiv.org/abs/1912.00998 for details about multigrid training. +# ---------------------------------------------------------------------------- # +_C.MULTIGRID = CfgNode() + +# Multigrid training allows us to train for more epochs with fewer iterations. +# This hyperparameter specifies how many times more epochs to train. +# The default setting in paper trains for 1.5x more epochs than baseline. +_C.MULTIGRID.EPOCH_FACTOR = 1.5 + +# Enable short cycles. +_C.MULTIGRID.SHORT_CYCLE = False +# Short cycle additional spatial dimensions relative to the default crop size. +_C.MULTIGRID.SHORT_CYCLE_FACTORS = [0.5, 0.5 ** 0.5] + +_C.MULTIGRID.LONG_CYCLE = False +# (Temporal, Spatial) dimensions relative to the default shape. +_C.MULTIGRID.LONG_CYCLE_FACTORS = [ + (0.25, 0.5 ** 0.5), + (0.5, 0.5 ** 0.5), + (0.5, 1), + (1, 1), +] + +# While a standard BN computes stats across all examples in a GPU, +# for multigrid training we fix the number of clips to compute BN stats on. +# See https://arxiv.org/abs/1912.00998 for details. +_C.MULTIGRID.BN_BASE_SIZE = 8 + +# Multigrid training epochs are not proportional to actual training time or +# computations, so _C.TRAIN.EVAL_PERIOD leads to too frequent or rare +# evaluation. We use a multigrid-specific rule to determine when to evaluate: +# This hyperparameter defines how many times to evaluate a model per long +# cycle shape. +_C.MULTIGRID.EVAL_FREQ = 3 + +# No need to specify; Set automatically and used as global variables. +_C.MULTIGRID.LONG_CYCLE_SAMPLING_RATE = 0 +_C.MULTIGRID.DEFAULT_B = 0 +_C.MULTIGRID.DEFAULT_T = 0 +_C.MULTIGRID.DEFAULT_S = 0 + +# ----------------------------------------------------------------------------- +# Tensorboard Visualization Options +# ----------------------------------------------------------------------------- +_C.TENSORBOARD = CfgNode() + +# Log to summary writer, this will automatically. +# log loss, lr and metrics during train/eval. +_C.TENSORBOARD.ENABLE = False +# Provide path to prediction results for visualization. +# This is a pickle file of [prediction_tensor, label_tensor] +_C.TENSORBOARD.PREDICTIONS_PATH = "" +# Path to directory for tensorboard logs. +# Default to to cfg.OUTPUT_DIR/runs-{cfg.TRAIN.DATASET}. +_C.TENSORBOARD.LOG_DIR = "" +# Path to a json file providing class_name - id mapping +# in the format {"class_name1": id1, "class_name2": id2, ...}. +# This file must be provided to enable plotting confusion matrix +# by a subset or parent categories. +_C.TENSORBOARD.CLASS_NAMES_PATH = "" + +# Path to a json file for categories -> classes mapping +# in the format {"parent_class": ["child_class1", "child_class2",...], ...}. +_C.TENSORBOARD.CATEGORIES_PATH = "" + +# Config for confusion matrices visualization. +_C.TENSORBOARD.CONFUSION_MATRIX = CfgNode() +# Visualize confusion matrix. +_C.TENSORBOARD.CONFUSION_MATRIX.ENABLE = False +# Figure size of the confusion matrices plotted. +_C.TENSORBOARD.CONFUSION_MATRIX.FIGSIZE = [8, 8] +# Path to a subset of categories to visualize. +# File contains class names separated by newline characters. +_C.TENSORBOARD.CONFUSION_MATRIX.SUBSET_PATH = "" + +# Config for histogram visualization. +_C.TENSORBOARD.HISTOGRAM = CfgNode() +# Visualize histograms. +_C.TENSORBOARD.HISTOGRAM.ENABLE = False +# Path to a subset of classes to plot histograms. +# Class names must be separated by newline characters. +_C.TENSORBOARD.HISTOGRAM.SUBSET_PATH = "" +# Visualize top-k most predicted classes on histograms for each +# chosen true label. +_C.TENSORBOARD.HISTOGRAM.TOPK = 10 +# Figure size of the histograms plotted. +_C.TENSORBOARD.HISTOGRAM.FIGSIZE = [8, 8] + +# Config for layers' weights and activations visualization. +# _C.TENSORBOARD.ENABLE must be True. +_C.TENSORBOARD.MODEL_VIS = CfgNode() + +# If False, skip model visualization. +_C.TENSORBOARD.MODEL_VIS.ENABLE = False + +# If False, skip visualizing model weights. +_C.TENSORBOARD.MODEL_VIS.MODEL_WEIGHTS = False + +# If False, skip visualizing model activations. +_C.TENSORBOARD.MODEL_VIS.ACTIVATIONS = False + +# If False, skip visualizing input videos. +_C.TENSORBOARD.MODEL_VIS.INPUT_VIDEO = False + + +# List of strings containing data about layer names and their indexing to +# visualize weights and activations for. The indexing is meant for +# choosing a subset of activations outputed by a layer for visualization. +# If indexing is not specified, visualize all activations outputed by the layer. +# For each string, layer name and indexing is separated by whitespaces. +# e.g.: [layer1 1,2;1,2, layer2, layer3 150,151;3,4]; this means for each array `arr` +# along the batch dimension in `layer1`, we take arr[[1, 2], [1, 2]] +_C.TENSORBOARD.MODEL_VIS.LAYER_LIST = [] +# Top-k predictions to plot on videos +_C.TENSORBOARD.MODEL_VIS.TOPK_PREDS = 1 +# Colormap to for text boxes and bounding boxes colors +_C.TENSORBOARD.MODEL_VIS.COLORMAP = "Pastel2" +# Config for visualization video inputs with Grad-CAM. +# _C.TENSORBOARD.ENABLE must be True. +_C.TENSORBOARD.MODEL_VIS.GRAD_CAM = CfgNode() +# Whether to run visualization using Grad-CAM technique. +_C.TENSORBOARD.MODEL_VIS.GRAD_CAM.ENABLE = True +# CNN layers to use for Grad-CAM. The number of layers must be equal to +# number of pathway(s). +_C.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST = [] +# If True, visualize Grad-CAM using true labels for each instances. +# If False, use the highest predicted class. +_C.TENSORBOARD.MODEL_VIS.GRAD_CAM.USE_TRUE_LABEL = False +# Colormap to for text boxes and bounding boxes colors +_C.TENSORBOARD.MODEL_VIS.GRAD_CAM.COLORMAP = "viridis" + +# Config for visualization for wrong prediction visualization. +# _C.TENSORBOARD.ENABLE must be True. +_C.TENSORBOARD.WRONG_PRED_VIS = CfgNode() +_C.TENSORBOARD.WRONG_PRED_VIS.ENABLE = False +# Folder tag to origanize model eval videos under. +_C.TENSORBOARD.WRONG_PRED_VIS.TAG = "Incorrectly classified videos." +# Subset of labels to visualize. Only wrong predictions with true labels +# within this subset is visualized. +_C.TENSORBOARD.WRONG_PRED_VIS.SUBSET_PATH = "" + + + +############### +_C.JITTER = CfgNode() + +_C.JITTER.ENABLE = False + +_C.JITTER.CONTINUS_METHODS=["blend_diff_person","blend_downsampled","blend_same_person"] +_C.JITTER.DISCONTINUS_METHODS=["light", "rotate", "skip"] + +_C.JITTER.STRONG_INNER_CLIP_MASK_JITTER= False + +# ---------------------------------------------------------------------------- # +# Demo options +# ---------------------------------------------------------------------------- # +_C.DEMO = CfgNode() + +# Run model in DEMO mode. +_C.DEMO.ENABLE = False + +# Path to a json file providing class_name - id mapping +# in the format {"class_name1": id1, "class_name2": id2, ...}. +_C.DEMO.LABEL_FILE_PATH = "" + +# Specify a camera device as input. This will be prioritized +# over input video if set. +# If -1, use input video instead. +_C.DEMO.WEBCAM = -1 + +# Path to input video for demo. +_C.DEMO.INPUT_VIDEO = "" +# Custom width for reading input video data. +_C.DEMO.DISPLAY_WIDTH = 0 +# Custom height for reading input video data. +_C.DEMO.DISPLAY_HEIGHT = 0 +# Path to Detectron2 object detection model configuration, +# only used for detection tasks. +_C.DEMO.DETECTRON2_CFG = "COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml" +# Path to Detectron2 object detection model pre-trained weights. +_C.DEMO.DETECTRON2_WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_R_50_FPN_3x/137849458/model_final_280758.pkl" +# Threshold for choosing predicted bounding boxes by Detectron2. +_C.DEMO.DETECTRON2_THRESH = 0.9 +# Number of overlapping frames between 2 consecutive clips. +# Increase this number for more frequent action predictions. +# The number of overlapping frames cannot be larger than +# half of the sequence length `cfg.DATA.NUM_FRAMES * cfg.DATA.SAMPLING_RATE` +_C.DEMO.BUFFER_SIZE = 0 +# If specified, the visualized outputs will be written this a video file of +# this path. Otherwise, the visualized outputs will be displayed in a window. +_C.DEMO.OUTPUT_FILE = "" +# Frames per second rate for writing to output video file. +# If not set (-1), use fps rate from input file. +_C.DEMO.OUTPUT_FPS = -1 +# Input format from demo video reader ("RGB" or "BGR"). +_C.DEMO.INPUT_FORMAT = "BGR" +# Draw visualization frames in [keyframe_idx - CLIP_VIS_SIZE, keyframe_idx + CLIP_VIS_SIZE] inclusively. +_C.DEMO.CLIP_VIS_SIZE = 10 +# Number of processes to run video visualizer. +_C.DEMO.NUM_VIS_INSTANCES = 2 + +# Path to pre-computed predicted boxes +_C.DEMO.PREDS_BOXES = "" +# Whether to run in with multi-threaded video reader. +_C.DEMO.THREAD_ENABLE = False +# Take one clip for every `DEMO.NUM_CLIPS_SKIP` + 1 for prediction and visualization. +# This is used for fast demo speed by reducing the prediction/visualiztion frequency. +# If -1, take the most recent read clip for visualization. This mode is only supported +# if `DEMO.THREAD_ENABLE` is set to True. +_C.DEMO.NUM_CLIPS_SKIP = 0 +# Path to ground-truth boxes and labels (optional) +_C.DEMO.GT_BOXES = "" +# The starting second of the video w.r.t bounding boxes file. +_C.DEMO.STARTING_SECOND = 900 +# Frames per second of the input video/folder of images. +_C.DEMO.FPS = 30 +# Visualize with top-k predictions or predictions above certain threshold(s). +# Option: {"thres", "top-k"} +_C.DEMO.VIS_MODE = "thres" +# Threshold for common class names. +_C.DEMO.COMMON_CLASS_THRES = 0.7 +# Theshold for uncommon class names. This will not be +# used if `_C.DEMO.COMMON_CLASS_NAMES` is empty. +_C.DEMO.UNCOMMON_CLASS_THRES = 0.3 +# This is chosen based on distribution of examples in +# each classes in AVA dataset. +_C.DEMO.COMMON_CLASS_NAMES = [ + "watch (a person)", + "talk to (e.g., self, a person, a group)", + "listen to (a person)", + "touch (an object)", + "carry/hold (an object)", + "walk", + "sit", + "lie/sleep", + "bend/bow (at the waist)", +] +# Slow-motion rate for the visualization. The visualized portions of the +# video will be played `_C.DEMO.SLOWMO` times slower than usual speed. +_C.DEMO.SLOWMO = 1 + +# Add custom config with default values. +custom_config.add_custom_config(_C) + + +def _assert_and_infer_cfg(cfg): + # BN assertions. + if cfg.BN.USE_PRECISE_STATS: + assert cfg.BN.NUM_BATCHES_PRECISE >= 0 + # TRAIN assertions. + assert cfg.TRAIN.CHECKPOINT_TYPE in ["pytorch", "caffe2"] + assert cfg.TRAIN.BATCH_SIZE % cfg.NUM_GPUS == 0 + + # TEST assertions. + assert cfg.TEST.CHECKPOINT_TYPE in ["pytorch", "caffe2"] + assert cfg.TEST.BATCH_SIZE % cfg.NUM_GPUS == 0 + assert cfg.TEST.NUM_SPATIAL_CROPS == 3 + + # RESNET assertions. + assert cfg.RESNET.NUM_GROUPS > 0 + assert cfg.RESNET.WIDTH_PER_GROUP > 0 + assert cfg.RESNET.WIDTH_PER_GROUP % cfg.RESNET.NUM_GROUPS == 0 + + # General assertions. + assert cfg.SHARD_ID < cfg.NUM_SHARDS + return cfg + + +def get_cfg(): + """ + Get a copy of the default config. + """ + return _assert_and_infer_cfg(_C.clone()) diff --git a/training/detectors/utils/slowfast/models/__init__.py b/training/detectors/utils/slowfast/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f82b3dc1e3af0dbabf4a3a6153e48b977eb1059e --- /dev/null +++ b/training/detectors/utils/slowfast/models/__init__.py @@ -0,0 +1,6 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +from .build import MODEL_REGISTRY, build_model # noqa +from .custom_video_model_builder import * # noqa +from .video_model_builder import ResNet, SlowFast # noqa \ No newline at end of file diff --git a/training/detectors/utils/slowfast/models/batchnorm_helper.py b/training/detectors/utils/slowfast/models/batchnorm_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..4e52d50497d9c0a58e5ace0a2fde94b5418ef563 --- /dev/null +++ b/training/detectors/utils/slowfast/models/batchnorm_helper.py @@ -0,0 +1,218 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""BatchNorm (BN) utility functions and custom batch-size BN implementations""" + +from functools import partial +import torch +import torch.distributed as dist +import torch.nn as nn +from torch.autograd.function import Function + +import slowfast.utils.distributed as du + + +def get_norm(cfg): + """ + Args: + cfg (CfgNode): model building configs, details are in the comments of + the config file. + Returns: + nn.Module: the normalization layer. + """ + if cfg.BN.NORM_TYPE == "batchnorm": + return nn.BatchNorm3d + elif cfg.BN.NORM_TYPE == "sub_batchnorm": + return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS) + elif cfg.BN.NORM_TYPE == "sync_batchnorm": + return partial( + NaiveSyncBatchNorm3d, num_sync_devices=cfg.BN.NUM_SYNC_DEVICES + ) + else: + raise NotImplementedError( + "Norm type {} is not supported".format(cfg.BN.NORM_TYPE) + ) + + +class SubBatchNorm3d(nn.Module): + """ + The standard BN layer computes stats across all examples in a GPU. In some + cases it is desirable to compute stats across only a subset of examples + (e.g., in multigrid training https://arxiv.org/abs/1912.00998). + SubBatchNorm3d splits the batch dimension into N splits, and run BN on + each of them separately (so that the stats are computed on each subset of + examples (1/N of batch) independently. During evaluation, it aggregates + the stats from all splits into one BN. + """ + + def __init__(self, num_splits, **args): + """ + Args: + num_splits (int): number of splits. + args (list): other arguments. + """ + super(SubBatchNorm3d, self).__init__() + self.num_splits = num_splits + num_features = args["num_features"] + # Keep only one set of weight and bias. + if args.get("affine", True): + self.affine = True + args["affine"] = False + self.weight = torch.nn.Parameter(torch.ones(num_features)) + self.bias = torch.nn.Parameter(torch.zeros(num_features)) + else: + self.affine = False + self.bn = nn.BatchNorm3d(**args) + args["num_features"] = num_features * num_splits + self.split_bn = nn.BatchNorm3d(**args) + + def _get_aggregated_mean_std(self, means, stds, n): + """ + Calculate the aggregated mean and stds. + Args: + means (tensor): mean values. + stds (tensor): standard deviations. + n (int): number of sets of means and stds. + """ + mean = means.view(n, -1).sum(0) / n + std = ( + stds.view(n, -1).sum(0) / n + + ((means.view(n, -1) - mean) ** 2).view(n, -1).sum(0) / n + ) + return mean.detach(), std.detach() + + def aggregate_stats(self): + """ + Synchronize running_mean, and running_var. Call this before eval. + """ + if self.split_bn.track_running_stats: + ( + self.bn.running_mean.data, + self.bn.running_var.data, + ) = self._get_aggregated_mean_std( + self.split_bn.running_mean, + self.split_bn.running_var, + self.num_splits, + ) + + def forward(self, x): + if self.training: + n, c, t, h, w = x.shape + x = x.view(n // self.num_splits, c * self.num_splits, t, h, w) + x = self.split_bn(x) + x = x.view(n, c, t, h, w) + else: + x = self.bn(x) + if self.affine: + x = x * self.weight.view((-1, 1, 1, 1)) + x = x + self.bias.view((-1, 1, 1, 1)) + return x + + +class GroupGather(Function): + """ + GroupGather performs all gather on each of the local process/ GPU groups. + """ + + @staticmethod + def forward(ctx, input, num_sync_devices, num_groups): + """ + Perform forwarding, gathering the stats across different process/ GPU + group. + """ + ctx.num_sync_devices = num_sync_devices + ctx.num_groups = num_groups + + input_list = [ + torch.zeros_like(input) for k in range(du.get_local_size()) + ] + dist.all_gather( + input_list, input, async_op=False, group=du._LOCAL_PROCESS_GROUP + ) + + inputs = torch.stack(input_list, dim=0) + if num_groups > 1: + rank = du.get_local_rank() + group_idx = rank // num_sync_devices + inputs = inputs[ + group_idx + * num_sync_devices : (group_idx + 1) + * num_sync_devices + ] + inputs = torch.sum(inputs, dim=0) + return inputs + + @staticmethod + def backward(ctx, grad_output): + """ + Perform backwarding, gathering the gradients across different process/ GPU + group. + """ + grad_output_list = [ + torch.zeros_like(grad_output) for k in range(du.get_local_size()) + ] + dist.all_gather( + grad_output_list, + grad_output, + async_op=False, + group=du._LOCAL_PROCESS_GROUP, + ) + + grads = torch.stack(grad_output_list, dim=0) + if ctx.num_groups > 1: + rank = du.get_local_rank() + group_idx = rank // ctx.num_sync_devices + grads = grads[ + group_idx + * ctx.num_sync_devices : (group_idx + 1) + * ctx.num_sync_devices + ] + grads = torch.sum(grads, dim=0) + return grads, None, None + + +class NaiveSyncBatchNorm3d(nn.BatchNorm3d): + def __init__(self, num_sync_devices, **args): + """ + Naive version of Synchronized 3D BatchNorm. + Args: + num_sync_devices (int): number of device to sync. + args (list): other arguments. + """ + self.num_sync_devices = num_sync_devices + if self.num_sync_devices > 0: + assert du.get_local_size() % self.num_sync_devices == 0, ( + du.get_local_size(), + self.num_sync_devices, + ) + self.num_groups = du.get_local_size() // self.num_sync_devices + else: + self.num_sync_devices = du.get_local_size() + self.num_groups = 1 + super(NaiveSyncBatchNorm3d, self).__init__(**args) + + def forward(self, input): + if du.get_local_size() == 1 or not self.training: + return super().forward(input) + + assert input.shape[0] > 0, "SyncBatchNorm does not support empty inputs" + C = input.shape[1] + mean = torch.mean(input, dim=[0, 2, 3, 4]) + meansqr = torch.mean(input * input, dim=[0, 2, 3, 4]) + + vec = torch.cat([mean, meansqr], dim=0) + vec = GroupGather.apply(vec, self.num_sync_devices, self.num_groups) * ( + 1.0 / self.num_sync_devices + ) + + mean, meansqr = torch.split(vec, C) + var = meansqr - mean * mean + self.running_mean += self.momentum * (mean.detach() - self.running_mean) + self.running_var += self.momentum * (var.detach() - self.running_var) + + invstd = torch.rsqrt(var + self.eps) + scale = self.weight * invstd + bias = self.bias - mean * scale + scale = scale.reshape(1, -1, 1, 1, 1) + bias = bias.reshape(1, -1, 1, 1, 1) + return input * scale + bias diff --git a/training/detectors/utils/slowfast/models/build.py b/training/detectors/utils/slowfast/models/build.py new file mode 100644 index 0000000000000000000000000000000000000000..8dd9cca224b3e77bb8c3cd899489358737d4cd35 --- /dev/null +++ b/training/detectors/utils/slowfast/models/build.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Model construction functions.""" + +import torch +from fvcore.common.registry import Registry + +MODEL_REGISTRY = Registry("MODEL") +MODEL_REGISTRY.__doc__ = """ +Registry for video model. + +The registered object will be called with `obj(cfg)`. +The call should return a `torch.nn.Module` object. +""" + + +def build_model(cfg, gpu_id=None): + """ + Builds the video model. + Args: + cfg (configs): configs that contains the hyper-parameters to build the + backbone. Details can be seen in slowfast/config/defaults.py. + gpu_id (Optional[int]): specify the gpu index to build model. + """ + if torch.cuda.is_available(): + assert ( + cfg.NUM_GPUS <= torch.cuda.device_count() + ), "Cannot use more GPU devices than available" + else: + assert ( + cfg.NUM_GPUS == 0 + ), "Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs." + + # Construct the model + name = cfg.MODEL.MODEL_NAME + model = MODEL_REGISTRY.get(name)(cfg) + + if cfg.NUM_GPUS: + if gpu_id is None: + # Determine the GPU used by the current process + cur_device = torch.cuda.current_device() + else: + cur_device = gpu_id + # Transfer the model to the current GPU device + model = model.cuda(device=cur_device) + # Use multi-process data parallel model in the multi-gpu setting + if cfg.NUM_GPUS > 1: + # Make model replica operate on the current device + model = torch.nn.parallel.DistributedDataParallel( + module=model, device_ids=[cur_device], output_device=cur_device,find_unused_parameters=True + ) + return model diff --git a/training/detectors/utils/slowfast/models/custom_video_model_builder.py b/training/detectors/utils/slowfast/models/custom_video_model_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..f261f67b95616b8582b10998a290611ee108b2a9 --- /dev/null +++ b/training/detectors/utils/slowfast/models/custom_video_model_builder.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + + +"""A More Flexible Video models.""" diff --git a/training/detectors/utils/slowfast/models/head_helper.py b/training/detectors/utils/slowfast/models/head_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..df04b010430b6000005676d52174243383873d05 --- /dev/null +++ b/training/detectors/utils/slowfast/models/head_helper.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""ResNe(X)t Head helper.""" + +import torch +import torch.nn as nn + +class ResNetBasicHead(nn.Module): + """ + ResNe(X)t 3D head. + This layer performs a fully-connected projection during training, when the + input size is 1x1x1. It performs a convolutional projection during testing + when the input size is larger than 1x1x1. If the inputs are from multiple + different pathways, the inputs will be concatenated after pooling. + """ + + def __init__( + self, + dim_in, + num_classes, + pool_size, + dropout_rate=0.0, + act_func="softmax", + ): + """ + The `__init__` method of any subclass should also contain these + arguments. + ResNetBasicHead takes p pathways as input where p in [1, infty]. + + Args: + dim_in (list): the list of channel dimensions of the p inputs to the + ResNetHead. + num_classes (int): the channel dimensions of the p outputs to the + ResNetHead. + pool_size (list): the list of kernel sizes of p spatial temporal + poolings, temporal pool kernel size, spatial pool kernel size, + spatial pool kernel size in order. + dropout_rate (float): dropout rate. If equal to 0.0, perform no + dropout. + act_func (string): activation function to use. 'softmax': applies + softmax on the output. 'sigmoid': applies sigmoid on the output. + """ + super(ResNetBasicHead, self).__init__() + assert ( + len({len(pool_size), len(dim_in)}) == 1 + ), "pathway dimensions are not consistent." + self.num_pathways = len(pool_size) + + for pathway in range(self.num_pathways): + if pool_size[pathway] is None: + avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1)) + else: + avg_pool = nn.AvgPool3d(pool_size[pathway], stride=1) + self.add_module("pathway{}_avgpool".format(pathway), avg_pool) + + if dropout_rate > 0.0: + self.dropout = nn.Dropout(dropout_rate) + # Perform FC in a fully convolutional manner. The FC layer will be + # initialized with a different std comparing to convolutional layers. + self.projection = nn.Linear(sum(dim_in), num_classes, bias=True) + + # Softmax for evaluation and testing. + if act_func == "softmax": + self.act = nn.Softmax(dim=4) + elif act_func == "sigmoid": + self.act = nn.Sigmoid() + else: + raise NotImplementedError( + "{} is not supported as an activation" + "function.".format(act_func) + ) + + def forward(self, inputs): + assert ( + len(inputs) == self.num_pathways + ), "Input tensor does not contain {} pathway".format(self.num_pathways) + pool_out = [] + for pathway in range(self.num_pathways): + m = getattr(self, "pathway{}_avgpool".format(pathway)) + pool_out.append(m(inputs[pathway])) + x = torch.cat(pool_out, 1) + # (N, C, T, H, W) -> (N, T, H, W, C). + x = x.permute((0, 2, 3, 4, 1)) + # Perform dropout. + if hasattr(self, "dropout"): + x = self.dropout(x) + x = self.projection(x) + + # Performs fully convlutional inference. + # if not self.training: + # x = x.mean([1, 2, 3]) + x = self.act(x) + x = x.view(x.shape[0], -1) + return x diff --git a/training/detectors/utils/slowfast/models/losses.py b/training/detectors/utils/slowfast/models/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..7dda4eb19b2cf76275ba1778dc5f8730058a6c31 --- /dev/null +++ b/training/detectors/utils/slowfast/models/losses.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Loss functions.""" + +import torch.nn as nn + +_LOSSES = { + "cross_entropy": nn.CrossEntropyLoss, + "bce": nn.BCELoss, + "bce_logit": nn.BCEWithLogitsLoss, +} + + +def get_loss_func(loss_name): + """ + Retrieve the loss given the loss name. + Args (int): + loss_name: the name of the loss to use. + """ + if loss_name not in _LOSSES.keys(): + raise NotImplementedError("Loss {} is not supported".format(loss_name)) + return _LOSSES[loss_name] diff --git a/training/detectors/utils/slowfast/models/nonlocal_helper.py b/training/detectors/utils/slowfast/models/nonlocal_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..6e68d05817256a66d0b6ecf0f96292446cf41270 --- /dev/null +++ b/training/detectors/utils/slowfast/models/nonlocal_helper.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Non-local helper""" + +import torch +import torch.nn as nn + + +class Nonlocal(nn.Module): + """ + Builds Non-local Neural Networks as a generic family of building + blocks for capturing long-range dependencies. Non-local Network + computes the response at a position as a weighted sum of the + features at all positions. This building block can be plugged into + many computer vision architectures. + More details in the paper: https://arxiv.org/pdf/1711.07971.pdf + """ + + def __init__( + self, + dim, + dim_inner, + pool_size=None, + instantiation="softmax", + zero_init_final_conv=False, + zero_init_final_norm=True, + norm_eps=1e-5, + norm_momentum=0.1, + norm_module=nn.BatchNorm3d, + ): + """ + Args: + dim (int): number of dimension for the input. + dim_inner (int): number of dimension inside of the Non-local block. + pool_size (list): the kernel size of spatial temporal pooling, + temporal pool kernel size, spatial pool kernel size, spatial + pool kernel size in order. By default pool_size is None, + then there would be no pooling used. + instantiation (string): supports two different instantiation method: + "dot_product": normalizing correlation matrix with L2. + "softmax": normalizing correlation matrix with Softmax. + zero_init_final_conv (bool): If true, zero initializing the final + convolution of the Non-local block. + zero_init_final_norm (bool): + If true, zero initializing the final batch norm of the Non-local + block. + norm_module (nn.Module): nn.Module for the normalization layer. The + default is nn.BatchNorm3d. + """ + super(Nonlocal, self).__init__() + self.dim = dim + self.dim_inner = dim_inner + self.pool_size = pool_size + self.instantiation = instantiation + self.use_pool = ( + False + if pool_size is None + else any((size > 1 for size in pool_size)) + ) + self.norm_eps = norm_eps + self.norm_momentum = norm_momentum + self._construct_nonlocal( + zero_init_final_conv, zero_init_final_norm, norm_module + ) + + def _construct_nonlocal( + self, zero_init_final_conv, zero_init_final_norm, norm_module + ): + # Three convolution heads: theta, phi, and g. + self.conv_theta = nn.Conv3d( + self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0 + ) + self.conv_phi = nn.Conv3d( + self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0 + ) + self.conv_g = nn.Conv3d( + self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0 + ) + + # Final convolution output. + self.conv_out = nn.Conv3d( + self.dim_inner, self.dim, kernel_size=1, stride=1, padding=0 + ) + # Zero initializing the final convolution output. + self.conv_out.zero_init = zero_init_final_conv + + # TODO: change the name to `norm` + self.bn = norm_module( + num_features=self.dim, + eps=self.norm_eps, + momentum=self.norm_momentum, + ) + # Zero initializing the final bn. + self.bn.transform_final_bn = zero_init_final_norm + + # Optional to add the spatial-temporal pooling. + if self.use_pool: + self.pool = nn.MaxPool3d( + kernel_size=self.pool_size, + stride=self.pool_size, + padding=[0, 0, 0], + ) + + def forward(self, x): + x_identity = x + N, C, T, H, W = x.size() + + theta = self.conv_theta(x) + + # Perform temporal-spatial pooling to reduce the computation. + if self.use_pool: + x = self.pool(x) + + phi = self.conv_phi(x) + g = self.conv_g(x) + + theta = theta.view(N, self.dim_inner, -1) + phi = phi.view(N, self.dim_inner, -1) + g = g.view(N, self.dim_inner, -1) + + # (N, C, TxHxW) * (N, C, TxHxW) => (N, TxHxW, TxHxW). + theta_phi = torch.einsum("nct,ncp->ntp", (theta, phi)) + # For original Non-local paper, there are two main ways to normalize + # the affinity tensor: + # 1) Softmax normalization (norm on exp). + # 2) dot_product normalization. + if self.instantiation == "softmax": + # Normalizing the affinity tensor theta_phi before softmax. + theta_phi = theta_phi * (self.dim_inner ** -0.5) + theta_phi = nn.functional.softmax(theta_phi, dim=2) + elif self.instantiation == "dot_product": + spatial_temporal_dim = theta_phi.shape[2] + theta_phi = theta_phi / spatial_temporal_dim + else: + raise NotImplementedError( + "Unknown norm type {}".format(self.instantiation) + ) + + # (N, TxHxW, TxHxW) * (N, C, TxHxW) => (N, C, TxHxW). + theta_phi_g = torch.einsum("ntg,ncg->nct", (theta_phi, g)) + + # (N, C, TxHxW) => (N, C, T, H, W). + theta_phi_g = theta_phi_g.view(N, self.dim_inner, T, H, W) + + p = self.conv_out(theta_phi_g) + p = self.bn(p) + return x_identity + p diff --git a/training/detectors/utils/slowfast/models/optimizer.py b/training/detectors/utils/slowfast/models/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..130f2cebf994741bc45a6519f07c5f0740c106ac --- /dev/null +++ b/training/detectors/utils/slowfast/models/optimizer.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Optimizer.""" + +import torch + +import slowfast.utils.lr_policy as lr_policy + + +def construct_optimizer(model, cfg): + """ + Construct a stochastic gradient descent or ADAM optimizer with momentum. + Details can be found in: + Herbert Robbins, and Sutton Monro. "A stochastic approximation method." + and + Diederik P.Kingma, and Jimmy Ba. + "Adam: A Method for Stochastic Optimization." + + Args: + model (model): model to perform stochastic gradient descent + optimization or ADAM optimization. + cfg (config): configs of hyper-parameters of SGD or ADAM, includes base + learning rate, momentum, weight_decay, dampening, and etc. + """ + # Batchnorm parameters. + bn_params = [] + # Non-batchnorm parameters. + non_bn_parameters = [] + for name, p in model.named_parameters(): + if "bn" in name: + bn_params.append(p) + else: + non_bn_parameters.append(p) + # Apply different weight decay to Batchnorm and non-batchnorm parameters. + # In Caffe2 classification codebase the weight decay for batchnorm is 0.0. + # Having a different weight decay on batchnorm might cause a performance + # drop. + optim_params = [ + {"params": bn_params, "weight_decay": cfg.BN.WEIGHT_DECAY}, + {"params": non_bn_parameters, "weight_decay": cfg.SOLVER.WEIGHT_DECAY}, + ] + # Check all parameters will be passed into optimizer. + assert len(list(model.parameters())) == len(non_bn_parameters) + len( + bn_params + ), "parameter size does not match: {} + {} != {}".format( + len(non_bn_parameters), len(bn_params), len(list(model.parameters())) + ) + + if cfg.SOLVER.OPTIMIZING_METHOD == "sgd": + return torch.optim.SGD( + optim_params, + lr=cfg.SOLVER.BASE_LR, + momentum=cfg.SOLVER.MOMENTUM, + weight_decay=cfg.SOLVER.WEIGHT_DECAY, + dampening=cfg.SOLVER.DAMPENING, + nesterov=cfg.SOLVER.NESTEROV, + ) + elif cfg.SOLVER.OPTIMIZING_METHOD == "adam": + return torch.optim.Adam( + optim_params, + lr=cfg.SOLVER.BASE_LR, + betas=(0.9, 0.999), + weight_decay=cfg.SOLVER.WEIGHT_DECAY, + ) + else: + raise NotImplementedError( + "Does not support {} optimizer".format(cfg.SOLVER.OPTIMIZING_METHOD) + ) + + +def get_epoch_lr(cur_epoch, cfg): + """ + Retrieves the lr for the given epoch (as specified by the lr policy). + Args: + cfg (config): configs of hyper-parameters of ADAM, includes base + learning rate, betas, and weight decays. + cur_epoch (float): the number of epoch of the current training stage. + """ + return lr_policy.get_lr_at_epoch(cfg, cur_epoch) + +def get_iter_lr(cur_iter, cfg): + """ + Retrieves the lr for the given iter (as specified by the lr policy). + Args: + cfg (config): configs of hyper-parameters of ADAM, includes base + learning rate, betas, and weight decays. + cur_epoch (float): the number of epoch of the current training stage. + """ + lr=lr_policy.get_lr_at_iter(cfg, cur_iter) + + return lr + + +def set_lr(optimizer, new_lr): + """ + Sets the optimizer lr to the specified value. + Args: + optimizer (optim): the optimizer using to optimize the current network. + new_lr (float): the new learning rate to set. + """ + for param_group in optimizer.param_groups: + param_group["lr"] = new_lr diff --git a/training/detectors/utils/slowfast/models/resnet_helper.py b/training/detectors/utils/slowfast/models/resnet_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..0005409707586ceaac71a50fdf07fadd6c9ffb6b --- /dev/null +++ b/training/detectors/utils/slowfast/models/resnet_helper.py @@ -0,0 +1,647 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Video models.""" + +import torch.nn as nn + +from slowfast.models.nonlocal_helper import Nonlocal + + +def get_trans_func(name): + """ + Retrieves the transformation module by name. + """ + trans_funcs = { + "bottleneck_transform": BottleneckTransform, + "basic_transform": BasicTransform, + "temporal_transform":TemporalTransform + } + assert ( + name in trans_funcs.keys() + ), "Transformation function '{}' not supported".format(name) + return trans_funcs[name] + + +class BasicTransform(nn.Module): + """ + Basic transformation: Tx3x3, 1x3x3, where T is the size of temporal kernel. + """ + + def __init__( + self, + dim_in, + dim_out, + temp_kernel_size, + stride, + dim_inner=None, + num_groups=1, + stride_1x1=None, + inplace_relu=True, + eps=1e-5, + bn_mmt=0.1, + norm_module=nn.BatchNorm3d, + ): + """ + Args: + dim_in (int): the channel dimensions of the input. + dim_out (int): the channel dimension of the output. + temp_kernel_size (int): the temporal kernel sizes of the first + convolution in the basic block. + stride (int): the stride of the bottleneck. + dim_inner (None): the inner dimension would not be used in + BasicTransform. + num_groups (int): number of groups for the convolution. Number of + group is always 1 for BasicTransform. + stride_1x1 (None): stride_1x1 will not be used in BasicTransform. + inplace_relu (bool): if True, calculate the relu on the original + input without allocating new memory. + eps (float): epsilon for batch norm. + bn_mmt (float): momentum for batch norm. Noted that BN momentum in + PyTorch = 1 - BN momentum in Caffe2. + norm_module (nn.Module): nn.Module for the normalization layer. The + default is nn.BatchNorm3d. + """ + super(BasicTransform, self).__init__() + self.temp_kernel_size = temp_kernel_size + self._inplace_relu = inplace_relu + self._eps = eps + self._bn_mmt = bn_mmt + self._construct(dim_in, dim_out, stride, norm_module) + + def _construct(self, dim_in, dim_out, stride, norm_module): + # Tx3x3, BN, ReLU. + self.a = nn.Conv3d( + dim_in, + dim_out, + kernel_size=[self.temp_kernel_size, 3, 3], + stride=[1, stride, stride], + padding=[int(self.temp_kernel_size // 2), 1, 1], + bias=False, + ) + self.a_bn = norm_module( + num_features=dim_out, eps=self._eps, momentum=self._bn_mmt + ) + self.a_relu = nn.ReLU(inplace=self._inplace_relu) + # 1x3x3, BN. + self.b = nn.Conv3d( + dim_out, + dim_out, + kernel_size=[1, 3, 3], + stride=[1, 1, 1], + padding=[0, 1, 1], + bias=False, + ) + self.b_bn = norm_module( + num_features=dim_out, eps=self._eps, momentum=self._bn_mmt + ) + + self.b_bn.transform_final_bn = True + + def forward(self, x): + x = self.a(x) + x = self.a_bn(x) + x = self.a_relu(x) + + x = self.b(x) + x = self.b_bn(x) + return x + +class TemporalTransform(nn.Module): + """ + Basic transformation: Tx3x3, 1x3x3, where T is the size of temporal kernel. + """ + + def __init__( + self, + dim_in, + dim_out, + temp_kernel_size, + stride, + dim_inner=None, + num_groups=1, + stride_1x1=None, + inplace_relu=True, + eps=1e-5, + bn_mmt=0.1, + norm_module=nn.BatchNorm3d, + dilation=1 + ): + """ + Args: + dim_in (int): the channel dimensions of the input. + dim_out (int): the channel dimension of the output. + temp_kernel_size (int): the temporal kernel sizes of the first + convolution in the basic block. + stride (int): the stride of the bottleneck. + dim_inner (None): the inner dimension would not be used in + BasicTransform. + num_groups (int): number of groups for the convolution. Number of + group is always 1 for BasicTransform. + stride_1x1 (None): stride_1x1 will not be used in BasicTransform. + inplace_relu (bool): if True, calculate the relu on the original + input without allocating new memory. + eps (float): epsilon for batch norm. + bn_mmt (float): momentum for batch norm. Noted that BN momentum in + PyTorch = 1 - BN momentum in Caffe2. + norm_module (nn.Module): nn.Module for the normalization layer. The + default is nn.BatchNorm3d. + """ + super(TemporalTransform, self).__init__() + self.temp_kernel_size = temp_kernel_size + self._inplace_relu = inplace_relu + self._eps = eps + self._bn_mmt = bn_mmt + self._construct(dim_in, dim_out, stride, norm_module) + + def _construct(self, dim_in, dim_out, stride, norm_module): + # Tx3x3, BN, ReLU. + self.a = nn.Conv3d( + dim_in, + dim_out, + kernel_size=[self.temp_kernel_size, 3, 3], + stride=[1, stride, stride], + padding=[int(self.temp_kernel_size // 2), 1, 1], + bias=False, + ) + self.a_bn = norm_module( + num_features=dim_out, eps=self._eps, momentum=self._bn_mmt + ) + self.a_relu = nn.ReLU(inplace=self._inplace_relu) + # 1x3x3, BN. + self.b = nn.Conv3d( + dim_out, + dim_out, + kernel_size=[1, 3, 3], + stride=[1, 1, 1], + padding=[0, 1, 1], + bias=False, + ) + self.b_bn = norm_module( + num_features=dim_out, eps=self._eps, momentum=self._bn_mmt + ) + + self.b_bn.transform_final_bn = True + + def forward(self, x): + x = self.a(x) + x = self.a_bn(x) + x = self.a_relu(x) + + x = self.b(x) + x = self.b_bn(x) + return x + + +class BottleneckTransform(nn.Module): + """ + Bottleneck transformation: Tx1x1, 1x3x3, 1x1x1, where T is the size of + temporal kernel. + """ + + def __init__( + self, + dim_in, + dim_out, + temp_kernel_size, + stride, + dim_inner, + num_groups, + stride_1x1=False, + inplace_relu=True, + eps=1e-5, + bn_mmt=0.1, + dilation=1, + norm_module=nn.BatchNorm3d, + ): + """ + Args: + dim_in (int): the channel dimensions of the input. + dim_out (int): the channel dimension of the output. + temp_kernel_size (int): the temporal kernel sizes of the first + convolution in the bottleneck. + stride (int): the stride of the bottleneck. + dim_inner (int): the inner dimension of the block. + num_groups (int): number of groups for the convolution. num_groups=1 + is for standard ResNet like networks, and num_groups>1 is for + ResNeXt like networks. + stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise + apply stride to the 3x3 conv. + inplace_relu (bool): if True, calculate the relu on the original + input without allocating new memory. + eps (float): epsilon for batch norm. + bn_mmt (float): momentum for batch norm. Noted that BN momentum in + PyTorch = 1 - BN momentum in Caffe2. + dilation (int): size of dilation. + norm_module (nn.Module): nn.Module for the normalization layer. The + default is nn.BatchNorm3d. + """ + super(BottleneckTransform, self).__init__() + self.temp_kernel_size = temp_kernel_size + self._inplace_relu = inplace_relu + self._eps = eps + self._bn_mmt = bn_mmt + self._stride_1x1 = stride_1x1 + self._construct( + dim_in, + dim_out, + stride, + dim_inner, + num_groups, + dilation, + norm_module, + ) + + def _construct( + self, + dim_in, + dim_out, + stride, + dim_inner, + num_groups, + dilation, + norm_module, + ): + (str1x1, str3x3) = (stride, 1) if self._stride_1x1 else (1, stride) + + # Tx1x1, BN, ReLU. + self.a = nn.Conv3d( + dim_in, + dim_inner, + kernel_size=[self.temp_kernel_size, 1, 1], + stride=[1, str1x1, str1x1], + padding=[int(self.temp_kernel_size // 2), 0, 0], + bias=False, + ) + self.a_bn = norm_module( + num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt + ) + self.a_relu = nn.ReLU(inplace=self._inplace_relu) + + # 1x3x3, BN, ReLU. + self.b = nn.Conv3d( + dim_inner, + dim_inner, + [1, 3, 3], + stride=[1, str3x3, str3x3], + padding=[0, dilation, dilation], + groups=num_groups, + bias=False, + dilation=[1, dilation, dilation], + ) + self.b_bn = norm_module( + num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt + ) + self.b_relu = nn.ReLU(inplace=self._inplace_relu) + + # 1x1x1, BN. + self.c = nn.Conv3d( + dim_inner, + dim_out, + kernel_size=[1, 1, 1], + stride=[1, 1, 1], + padding=[0, 0, 0], + bias=False, + ) + self.c_bn = norm_module( + num_features=dim_out, eps=self._eps, momentum=self._bn_mmt + ) + self.c_bn.transform_final_bn = True + + def forward(self, x): + # Explicitly forward every layer. + # Branch2a. + x = self.a(x) + x = self.a_bn(x) + x = self.a_relu(x) + + # Branch2b. + x = self.b(x) + x = self.b_bn(x) + x = self.b_relu(x) + + # Branch2c + x = self.c(x) + x = self.c_bn(x) + return x + + +class ResBlock(nn.Module): + """ + Residual block. + """ + + def __init__( + self, + dim_in, + dim_out, + temp_kernel_size, + stride, + trans_func, + dim_inner, + num_groups=1, + stride_1x1=False, + inplace_relu=True, + eps=1e-5, + bn_mmt=0.1, + dilation=1, + norm_module=nn.BatchNorm3d, + ): + """ + ResBlock class constructs redisual blocks. More details can be found in: + Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. + "Deep residual learning for image recognition." + https://arxiv.org/abs/1512.03385 + Args: + dim_in (int): the channel dimensions of the input. + dim_out (int): the channel dimension of the output. + temp_kernel_size (int): the temporal kernel sizes of the middle + convolution in the bottleneck. + stride (int): the stride of the bottleneck. + trans_func (string): transform function to be used to construct the + bottleneck. + dim_inner (int): the inner dimension of the block. + num_groups (int): number of groups for the convolution. num_groups=1 + is for standard ResNet like networks, and num_groups>1 is for + ResNeXt like networks. + stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise + apply stride to the 3x3 conv. + inplace_relu (bool): calculate the relu on the original input + without allocating new memory. + eps (float): epsilon for batch norm. + bn_mmt (float): momentum for batch norm. Noted that BN momentum in + PyTorch = 1 - BN momentum in Caffe2. + dilation (int): size of dilation. + norm_module (nn.Module): nn.Module for the normalization layer. The + default is nn.BatchNorm3d. + """ + super(ResBlock, self).__init__() + self._inplace_relu = inplace_relu + self._eps = eps + self._bn_mmt = bn_mmt + self._construct( + dim_in, + dim_out, + temp_kernel_size, + stride, + trans_func, + dim_inner, + num_groups, + stride_1x1, + inplace_relu, + dilation, + norm_module, + ) + + def _construct( + self, + dim_in, + dim_out, + temp_kernel_size, + stride, + trans_func, + dim_inner, + num_groups, + stride_1x1, + inplace_relu, + dilation, + norm_module, + ): + # Use skip connection with projection if dim or res change. + if (dim_in != dim_out) or (stride != 1): + self.branch1 = nn.Conv3d( + dim_in, + dim_out, + kernel_size=1, + stride=[1, stride, stride], + padding=0, + bias=False, + dilation=1, + ) + self.branch1_bn = norm_module( + num_features=dim_out, eps=self._eps, momentum=self._bn_mmt + ) + self.branch2 = trans_func( + dim_in, + dim_out, + temp_kernel_size, + stride, + dim_inner, + num_groups, + stride_1x1=stride_1x1, + inplace_relu=inplace_relu, + dilation=dilation, + norm_module=norm_module, + ) + self.relu = nn.ReLU(self._inplace_relu) + + def forward(self, x): + if hasattr(self, "branch1"): + x = self.branch1_bn(self.branch1(x)) + self.branch2(x) + else: + x = x + self.branch2(x) + x = self.relu(x) + return x + + +class ResStage(nn.Module): + """ + Stage of 3D ResNet. It expects to have one or more tensors as input for + single pathway (C2D, I3D, Slow), and multi-pathway (SlowFast) cases. + More details can be found here: + + Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. + "SlowFast networks for video recognition." + https://arxiv.org/pdf/1812.03982.pdf + """ + + def __init__( + self, + dim_in, + dim_out, + stride, + temp_kernel_sizes, + num_blocks, + dim_inner, + num_groups, + num_block_temp_kernel, + nonlocal_inds, + nonlocal_group, + nonlocal_pool, + dilation, + instantiation="softmax", + trans_func_name="bottleneck_transform", + stride_1x1=False, + inplace_relu=True, + norm_module=nn.BatchNorm3d, + ): + """ + The `__init__` method of any subclass should also contain these arguments. + ResStage builds p streams, where p can be greater or equal to one. + Args: + dim_in (list): list of p the channel dimensions of the input. + Different channel dimensions control the input dimension of + different pathways. + dim_out (list): list of p the channel dimensions of the output. + Different channel dimensions control the input dimension of + different pathways. + temp_kernel_sizes (list): list of the p temporal kernel sizes of the + convolution in the bottleneck. Different temp_kernel_sizes + control different pathway. + stride (list): list of the p strides of the bottleneck. Different + stride control different pathway. + num_blocks (list): list of p numbers of blocks for each of the + pathway. + dim_inner (list): list of the p inner channel dimensions of the + input. Different channel dimensions control the input dimension + of different pathways. + num_groups (list): list of number of p groups for the convolution. + num_groups=1 is for standard ResNet like networks, and + num_groups>1 is for ResNeXt like networks. + num_block_temp_kernel (list): extent the temp_kernel_sizes to + num_block_temp_kernel blocks, then fill temporal kernel size + of 1 for the rest of the layers. + nonlocal_inds (list): If the tuple is empty, no nonlocal layer will + be added. If the tuple is not empty, add nonlocal layers after + the index-th block. + dilation (list): size of dilation for each pathway. + nonlocal_group (list): list of number of p nonlocal groups. Each + number controls how to fold temporal dimension to batch + dimension before applying nonlocal transformation. + https://github.com/facebookresearch/video-nonlocal-net. + instantiation (string): different instantiation for nonlocal layer. + Supports two different instantiation method: + "dot_product": normalizing correlation matrix with L2. + "softmax": normalizing correlation matrix with Softmax. + trans_func_name (string): name of the the transformation function apply + on the network. + norm_module (nn.Module): nn.Module for the normalization layer. The + default is nn.BatchNorm3d. + """ + super(ResStage, self).__init__() + assert all( + ( + num_block_temp_kernel[i] <= num_blocks[i] + for i in range(len(temp_kernel_sizes)) + ) + ) + self.num_blocks = num_blocks + self.nonlocal_group = nonlocal_group + self.temp_kernel_sizes = [ + (temp_kernel_sizes[i] * num_blocks[i])[: num_block_temp_kernel[i]] + + [1] * (num_blocks[i] - num_block_temp_kernel[i]) + for i in range(len(temp_kernel_sizes)) + ] + assert ( + len( + { + len(dim_in), + len(dim_out), + len(temp_kernel_sizes), + len(stride), + len(num_blocks), + len(dim_inner), + len(num_groups), + len(num_block_temp_kernel), + len(nonlocal_inds), + len(nonlocal_group), + } + ) + == 1 + ) + self.num_pathways = len(self.num_blocks) + self._construct( + dim_in, + dim_out, + stride, + dim_inner, + num_groups, + trans_func_name, + stride_1x1, + inplace_relu, + nonlocal_inds, + nonlocal_pool, + instantiation, + dilation, + norm_module, + ) + + def _construct( + self, + dim_in, + dim_out, + stride, + dim_inner, + num_groups, + trans_func_name, + stride_1x1, + inplace_relu, + nonlocal_inds, + nonlocal_pool, + instantiation, + dilation, + norm_module, + ): + for pathway in range(self.num_pathways): + for i in range(self.num_blocks[pathway]): + # Retrieve the transformation function. + trans_func = get_trans_func(trans_func_name) + # Construct the block. + res_block = ResBlock( + dim_in[pathway] if i == 0 else dim_out[pathway], + dim_out[pathway], + self.temp_kernel_sizes[pathway][i], + stride[pathway] if i == 0 else 1, + trans_func, + dim_inner[pathway], + num_groups[pathway], + stride_1x1=stride_1x1, + inplace_relu=inplace_relu, + dilation=dilation[pathway], + norm_module=norm_module, + ) + self.add_module("pathway{}_res{}".format(pathway, i), res_block) + if i in nonlocal_inds[pathway]: + nln = Nonlocal( + dim_out[pathway], + dim_out[pathway] // 2, + nonlocal_pool[pathway], + instantiation=instantiation, + norm_module=norm_module, + ) + self.add_module( + "pathway{}_nonlocal{}".format(pathway, i), nln + ) + + def forward(self, inputs): + output = [] + for pathway in range(self.num_pathways): + x = inputs[pathway] + for i in range(self.num_blocks[pathway]): + m = getattr(self, "pathway{}_res{}".format(pathway, i)) + x = m(x) + if hasattr(self, "pathway{}_nonlocal{}".format(pathway, i)): + nln = getattr( + self, "pathway{}_nonlocal{}".format(pathway, i) + ) + b, c, t, h, w = x.shape + if self.nonlocal_group[pathway] > 1: + # Fold temporal dimension into batch dimension. + x = x.permute(0, 2, 1, 3, 4) + x = x.reshape( + b * self.nonlocal_group[pathway], + t // self.nonlocal_group[pathway], + c, + h, + w, + ) + x = x.permute(0, 2, 1, 3, 4) + x = nln(x) + if self.nonlocal_group[pathway] > 1: + # Fold back to temporal dimension. + x = x.permute(0, 2, 1, 3, 4) + x = x.reshape(b, t, c, h, w) + x = x.permute(0, 2, 1, 3, 4) + output.append(x) + + return output diff --git a/training/detectors/utils/slowfast/models/stem_helper.py b/training/detectors/utils/slowfast/models/stem_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..481977b15a13edf54bfdb17fd3627b6657d56262 --- /dev/null +++ b/training/detectors/utils/slowfast/models/stem_helper.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""ResNe(X)t 3D stem helper.""" + +import torch.nn as nn + + +class VideoModelStem(nn.Module): + """ + Video 3D stem module. Provides stem operations of Conv, BN, ReLU, MaxPool + on input data tensor for one or multiple pathways. + """ + + def __init__( + self, + dim_in, + dim_out, + kernel, + stride, + padding, + inplace_relu=True, + eps=1e-5, + bn_mmt=0.1, + norm_module=nn.BatchNorm3d, + ): + """ + The `__init__` method of any subclass should also contain these + arguments. List size of 1 for single pathway models (C2D, I3D, Slow + and etc), list size of 2 for two pathway models (SlowFast). + + Args: + dim_in (list): the list of channel dimensions of the inputs. + dim_out (list): the output dimension of the convolution in the stem + layer. + kernel (list): the kernels' size of the convolutions in the stem + layers. Temporal kernel size, height kernel size, width kernel + size in order. + stride (list): the stride sizes of the convolutions in the stem + layer. Temporal kernel stride, height kernel size, width kernel + size in order. + padding (list): the paddings' sizes of the convolutions in the stem + layer. Temporal padding size, height padding size, width padding + size in order. + inplace_relu (bool): calculate the relu on the original input + without allocating new memory. + eps (float): epsilon for batch norm. + bn_mmt (float): momentum for batch norm. Noted that BN momentum in + PyTorch = 1 - BN momentum in Caffe2. + norm_module (nn.Module): nn.Module for the normalization layer. The + default is nn.BatchNorm3d. + """ + super(VideoModelStem, self).__init__() + + assert ( + len( + { + len(dim_in), + len(dim_out), + len(kernel), + len(stride), + len(padding), + } + ) + == 1 + ), "Input pathway dimensions are not consistent." + self.num_pathways = len(dim_in) + self.kernel = kernel + self.stride = stride + self.padding = padding + self.inplace_relu = inplace_relu + self.eps = eps + self.bn_mmt = bn_mmt + # Construct the stem layer. + self._construct_stem(dim_in, dim_out, norm_module) + + def _construct_stem(self, dim_in, dim_out, norm_module): + for pathway in range(len(dim_in)): + stem = ResNetBasicStem( + dim_in[pathway], + dim_out[pathway], + self.kernel[pathway], + self.stride[pathway], + self.padding[pathway], + self.inplace_relu, + self.eps, + self.bn_mmt, + norm_module, + ) + self.add_module("pathway{}_stem".format(pathway), stem) + + def forward(self, x): + assert ( + len(x) == self.num_pathways + ), "Input tensor does not contain {} pathway".format(self.num_pathways) + for pathway in range(len(x)): + m = getattr(self, "pathway{}_stem".format(pathway)) + x[pathway] = m(x[pathway]) + return x + + +class ResNetBasicStem(nn.Module): + """ + ResNe(X)t 3D stem module. + Performs spatiotemporal Convolution, BN, and Relu following by a + spatiotemporal pooling. + """ + + def __init__( + self, + dim_in, + dim_out, + kernel, + stride, + padding, + inplace_relu=True, + eps=1e-5, + bn_mmt=0.1, + norm_module=nn.BatchNorm3d, + ): + """ + The `__init__` method of any subclass should also contain these arguments. + + Args: + dim_in (int): the channel dimension of the input. Normally 3 is used + for rgb input, and 2 or 3 is used for optical flow input. + dim_out (int): the output dimension of the convolution in the stem + layer. + kernel (list): the kernel size of the convolution in the stem layer. + temporal kernel size, height kernel size, width kernel size in + order. + stride (list): the stride size of the convolution in the stem layer. + temporal kernel stride, height kernel size, width kernel size in + order. + padding (int): the padding size of the convolution in the stem + layer, temporal padding size, height padding size, width + padding size in order. + inplace_relu (bool): calculate the relu on the original input + without allocating new memory. + eps (float): epsilon for batch norm. + bn_mmt (float): momentum for batch norm. Noted that BN momentum in + PyTorch = 1 - BN momentum in Caffe2. + norm_module (nn.Module): nn.Module for the normalization layer. The + default is nn.BatchNorm3d. + """ + super(ResNetBasicStem, self).__init__() + self.kernel = kernel + self.stride = stride + self.padding = padding + self.inplace_relu = inplace_relu + self.eps = eps + self.bn_mmt = bn_mmt + # Construct the stem layer. + self._construct_stem(dim_in, dim_out, norm_module) + + def _construct_stem(self, dim_in, dim_out, norm_module): + self.conv = nn.Conv3d( + dim_in, + dim_out, + self.kernel, + stride=self.stride, + padding=self.padding, + bias=False, + ) + self.bn = norm_module( + num_features=dim_out, eps=self.eps, momentum=self.bn_mmt + ) + self.relu = nn.ReLU(self.inplace_relu) + self.pool_layer = nn.MaxPool3d( + kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1] + ) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + x = self.pool_layer(x) + return x diff --git a/training/detectors/utils/slowfast/models/unet_helper.py b/training/detectors/utils/slowfast/models/unet_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..36b7202cd1936a433b193017f6c363e5dd317b4f --- /dev/null +++ b/training/detectors/utils/slowfast/models/unet_helper.py @@ -0,0 +1,157 @@ +InPlaceABN = None +from torch import nn +import torch.nn.functional as F + + +class Conv3dReLU(nn.Sequential): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + padding=0, + stride=1, + use_batchnorm=True, + ): + + if use_batchnorm == "inplace" and InPlaceABN is None: + raise RuntimeError( + "In order to use `use_batchnorm='inplace'` inplace_abn package must be installed. " + + "To install see: https://github.com/mapillary/inplace_abn" + ) + + conv = nn.Conv3d( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + bias=not (use_batchnorm), + ) + relu = nn.ReLU(inplace=True) + + if use_batchnorm == "inplace": + bn = InPlaceABN(out_channels, activation="leaky_relu", activation_param=0.0) + relu = nn.Identity() + + elif use_batchnorm and use_batchnorm != "inplace": + bn = nn.BatchNorm3d(out_channels) + + else: + bn = nn.Identity() + + super(Conv3dReLU, self).__init__(conv, bn, relu) + + +class DecoderBlock(nn.Module): + def __init__( + self, in_channels, skip_channels, out_channels, use_batchnorm=True, + ): + super().__init__() + self.conv1 = Conv3dReLU( + in_channels + skip_channels, + out_channels, + kernel_size=3, + padding=1, + use_batchnorm=use_batchnorm, + ) + + self.conv2 = Conv3dReLU( + out_channels, + out_channels, + kernel_size=3, + padding=1, + use_batchnorm=use_batchnorm, + ) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + return x + + +class LightDecoderBlock(nn.Module): + def __init__( + self, in_channels, skip_channels, out_channels, use_batchnorm=True, + ): + super().__init__() + self.conv1 = Conv3dReLU( + in_channels + skip_channels, + out_channels, + kernel_size=3, + padding=1, + use_batchnorm=use_batchnorm, + ) + + def forward(self, x): + x = self.conv1(x) + return x + + +def freeze_net(model: nn.Module, freeze_prefixs): + flag = False + for name, param in model.named_parameters(): + items = name.split(".") + if items[0] == "module": + prefix = items[1] + else: + prefix = items[0] + if prefix in freeze_prefixs: + if param.requires_grad is True: + param.requires_grad = False + flag = True + # print("freeze",name) + + assert flag + + +def unfreeze_net(model: nn.Module): + for name, param in model.named_parameters(): + param.requires_grad = True + + +from .resnet_helper import ResBlock, get_trans_func + + +class ResDecoderBlock(nn.Module): + def __init__( + self, in_channels, skip_channels, out_channels, use_batchnorm=True, + ): + super().__init__() + trans_func = get_trans_func("bottleneck_transform") + self.conv1 = ResBlock( + in_channels + skip_channels, + out_channels, + 3, + 1, + trans_func, + out_channels//2, + num_groups=1, + stride_1x1=False, + inplace_relu=True, + eps=1e-5, + bn_mmt=0.1, + dilation=1, + norm_module=nn.BatchNorm3d, + ) + + self.conv2 = ResBlock( + out_channels, + out_channels, + 3, + 1, + trans_func, + out_channels//2, + num_groups=1, + stride_1x1=False, + inplace_relu=True, + eps=1e-5, + bn_mmt=0.1, + dilation=1, + norm_module=nn.BatchNorm3d, + ) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + return x diff --git a/training/detectors/utils/slowfast/models/video_model_builder.py b/training/detectors/utils/slowfast/models/video_model_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..3c89e8ebb8f51fe79b82c01542f0668345212726 --- /dev/null +++ b/training/detectors/utils/slowfast/models/video_model_builder.py @@ -0,0 +1,2739 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Video models.""" + +import torch +import torch.nn as nn +import copy + +import slowfast.utils.weight_init_helper as init_helper +from slowfast.models.batchnorm_helper import get_norm + +from . import head_helper, resnet_helper, stem_helper +from .build import MODEL_REGISTRY + +# Number of blocks for different stages given the model depth. +_MODEL_STAGE_DEPTH = {18:(2,2,2,2),50: (3, 4, 6, 3), 101: (3, 4, 23, 3)} + +# Basis of temporal kernel sizes for each of the stage. +_TEMPORAL_KERNEL_BASIS = { + "c2d": [ + [[1]], # conv1 temporal kernel. + [[1]], # res2 temporal kernel. + [[1]], # res3 temporal kernel. + [[1]], # res4 temporal kernel. + [[1]], # res5 temporal kernel. + ], + "c2d_nopool": [ + [[1]], # conv1 temporal kernel. + [[1]], # res2 temporal kernel. + [[1]], # res3 temporal kernel. + [[1]], # res4 temporal kernel. + [[1]], # res5 temporal kernel. + ], + "i3d": [ + [[5]], # conv1 temporal kernel. + [[3]], # res2 temporal kernel. + [[3, 1]], # res3 temporal kernel. + [[3, 1]], # res4 temporal kernel. + [[1, 3]], # res5 temporal kernel. + ], + "r3d_18": [ + [[3]], # conv1 temporal kernel. + [[3]], # res2 temporal kernel. + [[3, 1]], # res3 temporal kernel. + [[3, 1]], # res4 temporal kernel. + [[1, 3]], # res5 temporal kernel. + ], + "i3d_nopool": [ + [[5]], # conv1 temporal kernel. + [[3]], # res2 temporal kernel. + [[3, 1]], # res3 temporal kernel. + [[3, 1]], # res4 temporal kernel. + [[1, 3]], # res5 temporal kernel. + ], + "slow": [ + [[1]], # conv1 temporal kernel. + [[1]], # res2 temporal kernel. + [[1]], # res3 temporal kernel. + [[3]], # res4 temporal kernel. + [[3]], # res5 temporal kernel. + ], + "slowfast": [ + [[1], [5]], # conv1 temporal kernel for slow and fast pathway. + [[1], [3]], # res2 temporal kernel for slow and fast pathway. + [[1], [3]], # res3 temporal kernel for slow and fast pathway. + [[3], [3]], # res4 temporal kernel for slow and fast pathway. + [[3], [3]], # res5 temporal kernel for slow and fast pathway. + ], +} + +_POOL1 = { + "c2d": [[2, 1, 1]], + "c2d_nopool": [[1, 1, 1]], + "i3d": [[2, 1, 1]], + "r3d_18": [[2, 1, 1]], + "i3d_nopool": [[1, 1, 1]], + "slow": [[1, 1, 1]], + "slowfast": [[1, 1, 1], [1, 1, 1]], +} + + + + +class FuseFastToSlow(nn.Module): + """ + Fuses the information from the Fast pathway to the Slow pathway. Given the + tensors from Slow pathway and Fast pathway, fuse information from Fast to + Slow, then return the fused tensors from Slow and Fast pathway in order. + """ + + def __init__( + self, + dim_in, + fusion_conv_channel_ratio, + fusion_kernel, + alpha, + eps=1e-5, + bn_mmt=0.1, + inplace_relu=True, + norm_module=nn.BatchNorm3d, + ): + """ + Args: + dim_in (int): the channel dimension of the input. + fusion_conv_channel_ratio (int): channel ratio for the convolution + used to fuse from Fast pathway to Slow pathway. + fusion_kernel (int): kernel size of the convolution used to fuse + from Fast pathway to Slow pathway. + alpha (int): the frame rate ratio between the Fast and Slow pathway. + eps (float): epsilon for batch norm. + bn_mmt (float): momentum for batch norm. Noted that BN momentum in + PyTorch = 1 - BN momentum in Caffe2. + inplace_relu (bool): if True, calculate the relu on the original + input without allocating new memory. + norm_module (nn.Module): nn.Module for the normalization layer. The + default is nn.BatchNorm3d. + """ + super(FuseFastToSlow, self).__init__() + self.conv_f2s = nn.Conv3d( + dim_in, + dim_in * fusion_conv_channel_ratio, + kernel_size=[fusion_kernel, 1, 1], + stride=[alpha, 1, 1], + padding=[fusion_kernel // 2, 0, 0], + bias=False, + ) + self.bn = norm_module( + num_features=dim_in * fusion_conv_channel_ratio, + eps=eps, + momentum=bn_mmt, + ) + self.relu = nn.ReLU(inplace_relu) + + def forward(self, x): + x_s = x[0] + x_f = x[1] + fuse = self.conv_f2s(x_f) + fuse = self.bn(fuse) + fuse = self.relu(fuse) + x_s_fuse = torch.cat([x_s, fuse], 1) + return [x_s_fuse, x_f] + + +@MODEL_REGISTRY.register() +class SlowFast(nn.Module): + """ + SlowFast model builder for SlowFast network. + + Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. + "SlowFast networks for video recognition." + https://arxiv.org/pdf/1812.03982.pdf + """ + + def __init__(self, cfg): + """ + The `__init__` method of any subclass should also contain these + arguments. + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + super(SlowFast, self).__init__() + self.norm_module = get_norm(cfg) + self.enable_detection = cfg.DETECTION.ENABLE + self.num_pathways = 2 + self._construct_network(cfg) + init_helper.init_weights( + self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN + ) + + def _construct_network(self, cfg): + """ + Builds a SlowFast model. The first pathway is the Slow pathway and the + second pathway is the Fast pathway. + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + assert cfg.MODEL.ARCH in _POOL1.keys() + pool_size = _POOL1[cfg.MODEL.ARCH] + assert len({len(pool_size), self.num_pathways}) == 1 + assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys() + + (d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH] + + num_groups = cfg.RESNET.NUM_GROUPS + width_per_group = cfg.RESNET.WIDTH_PER_GROUP + dim_inner = num_groups * width_per_group + out_dim_ratio = ( + cfg.SLOWFAST.BETA_INV // cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO + ) + + temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH] + + self.s1 = stem_helper.VideoModelStem( + dim_in=cfg.DATA.INPUT_CHANNEL_NUM, + dim_out=[width_per_group, width_per_group // cfg.SLOWFAST.BETA_INV], + kernel=[temp_kernel[0][0] + [7, 7], temp_kernel[0][1] + [7, 7]], + stride=[[1, 2, 2]] * 2, + padding=[ + [temp_kernel[0][0][0] // 2, 3, 3], + [temp_kernel[0][1][0] // 2, 3, 3], + ], + norm_module=self.norm_module, + ) + self.s1_fuse = FuseFastToSlow( + width_per_group // cfg.SLOWFAST.BETA_INV, + cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO, + cfg.SLOWFAST.FUSION_KERNEL_SZ, + cfg.SLOWFAST.ALPHA, + norm_module=self.norm_module, + ) + + self.s2 = resnet_helper.ResStage( + dim_in=[ + width_per_group + width_per_group // out_dim_ratio, + width_per_group // cfg.SLOWFAST.BETA_INV, + ], + dim_out=[ + width_per_group * 4, + width_per_group * 4 // cfg.SLOWFAST.BETA_INV, + ], + dim_inner=[dim_inner, dim_inner // cfg.SLOWFAST.BETA_INV], + temp_kernel_sizes=temp_kernel[1], + stride=cfg.RESNET.SPATIAL_STRIDES[0], + num_blocks=[d2] * 2, + num_groups=[num_groups] * 2, + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0], + nonlocal_inds=cfg.NONLOCAL.LOCATION[0], + nonlocal_group=cfg.NONLOCAL.GROUP[0], + nonlocal_pool=cfg.NONLOCAL.POOL[0], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + dilation=cfg.RESNET.SPATIAL_DILATIONS[0], + norm_module=self.norm_module, + ) + self.s2_fuse = FuseFastToSlow( + width_per_group * 4 // cfg.SLOWFAST.BETA_INV, + cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO, + cfg.SLOWFAST.FUSION_KERNEL_SZ, + cfg.SLOWFAST.ALPHA, + norm_module=self.norm_module, + ) + + for pathway in range(self.num_pathways): + pool = nn.MaxPool3d( + kernel_size=pool_size[pathway], + stride=pool_size[pathway], + padding=[0, 0, 0], + ) + self.add_module("pathway{}_pool".format(pathway), pool) + + self.s3 = resnet_helper.ResStage( + dim_in=[ + width_per_group * 4 + width_per_group * 4 // out_dim_ratio, + width_per_group * 4 // cfg.SLOWFAST.BETA_INV, + ], + dim_out=[ + width_per_group * 8, + width_per_group * 8 // cfg.SLOWFAST.BETA_INV, + ], + dim_inner=[dim_inner * 2, dim_inner * 2 // cfg.SLOWFAST.BETA_INV], + temp_kernel_sizes=temp_kernel[2], + stride=cfg.RESNET.SPATIAL_STRIDES[1], + num_blocks=[d3] * 2, + num_groups=[num_groups] * 2, + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1], + nonlocal_inds=cfg.NONLOCAL.LOCATION[1], + nonlocal_group=cfg.NONLOCAL.GROUP[1], + nonlocal_pool=cfg.NONLOCAL.POOL[1], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + dilation=cfg.RESNET.SPATIAL_DILATIONS[1], + norm_module=self.norm_module, + ) + self.s3_fuse = FuseFastToSlow( + width_per_group * 8 // cfg.SLOWFAST.BETA_INV, + cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO, + cfg.SLOWFAST.FUSION_KERNEL_SZ, + cfg.SLOWFAST.ALPHA, + norm_module=self.norm_module, + ) + + self.s4 = resnet_helper.ResStage( + dim_in=[ + width_per_group * 8 + width_per_group * 8 // out_dim_ratio, + width_per_group * 8 // cfg.SLOWFAST.BETA_INV, + ], + dim_out=[ + width_per_group * 16, + width_per_group * 16 // cfg.SLOWFAST.BETA_INV, + ], + dim_inner=[dim_inner * 4, dim_inner * 4 // cfg.SLOWFAST.BETA_INV], + temp_kernel_sizes=temp_kernel[3], + stride=cfg.RESNET.SPATIAL_STRIDES[2], + num_blocks=[d4] * 2, + num_groups=[num_groups] * 2, + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2], + nonlocal_inds=cfg.NONLOCAL.LOCATION[2], + nonlocal_group=cfg.NONLOCAL.GROUP[2], + nonlocal_pool=cfg.NONLOCAL.POOL[2], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + dilation=cfg.RESNET.SPATIAL_DILATIONS[2], + norm_module=self.norm_module, + ) + self.s4_fuse = FuseFastToSlow( + width_per_group * 16 // cfg.SLOWFAST.BETA_INV, + cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO, + cfg.SLOWFAST.FUSION_KERNEL_SZ, + cfg.SLOWFAST.ALPHA, + norm_module=self.norm_module, + ) + + self.s5 = resnet_helper.ResStage( + dim_in=[ + width_per_group * 16 + width_per_group * 16 // out_dim_ratio, + width_per_group * 16 // cfg.SLOWFAST.BETA_INV, + ], + dim_out=[ + width_per_group * 32, + width_per_group * 32 // cfg.SLOWFAST.BETA_INV, + ], + dim_inner=[dim_inner * 8, dim_inner * 8 // cfg.SLOWFAST.BETA_INV], + temp_kernel_sizes=temp_kernel[4], + stride=cfg.RESNET.SPATIAL_STRIDES[3], + num_blocks=[d5] * 2, + num_groups=[num_groups] * 2, + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3], + nonlocal_inds=cfg.NONLOCAL.LOCATION[3], + nonlocal_group=cfg.NONLOCAL.GROUP[3], + nonlocal_pool=cfg.NONLOCAL.POOL[3], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + dilation=cfg.RESNET.SPATIAL_DILATIONS[3], + norm_module=self.norm_module, + ) + + if cfg.DETECTION.ENABLE: + raise NotImplementedError + else: + self.head = head_helper.ResNetBasicHead( + dim_in=[ + width_per_group * 32, + width_per_group * 32 // cfg.SLOWFAST.BETA_INV, + ], + num_classes=cfg.MODEL.NUM_CLASSES, + pool_size=[None, None] + if cfg.MULTIGRID.SHORT_CYCLE + else [ + [ + cfg.DATA.NUM_FRAMES + // cfg.SLOWFAST.ALPHA + // pool_size[0][0], + cfg.DATA.CROP_SIZE // 32 // pool_size[0][1], + cfg.DATA.CROP_SIZE // 32 // pool_size[0][2], + ], + [ + cfg.DATA.NUM_FRAMES // pool_size[1][0], + cfg.DATA.CROP_SIZE // 32 // pool_size[1][1], + cfg.DATA.CROP_SIZE // 32 // pool_size[1][2], + ], + ], # None for AdaptiveAvgPool3d((1, 1, 1)) + dropout_rate=cfg.MODEL.DROPOUT_RATE, + act_func=cfg.MODEL.HEAD_ACT, + ) + + def forward(self, x, bboxes=None): + x = self.s1(x) + x = self.s1_fuse(x) + x = self.s2(x) + x = self.s2_fuse(x) + for pathway in range(self.num_pathways): + pool = getattr(self, "pathway{}_pool".format(pathway)) + x[pathway] = pool(x[pathway]) + x = self.s3(x) + x = self.s3_fuse(x) + x = self.s4(x) + x = self.s4_fuse(x) + x = self.s5(x) + if self.enable_detection: + x = self.head(x, bboxes) + else: + x = self.head(x) + return x + + +@MODEL_REGISTRY.register() +class ResNet(nn.Module): + """ + ResNet model builder. It builds a ResNet like network backbone without + lateral connection (C2D, I3D, Slow). + + Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. + "SlowFast networks for video recognition." + https://arxiv.org/pdf/1812.03982.pdf + + Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. + "Non-local neural networks." + https://arxiv.org/pdf/1711.07971.pdf + """ + + def __init__(self, cfg): + """ + The `__init__` method of any subclass should also contain these + arguments. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + super(ResNet, self).__init__() + self.norm_module = get_norm(cfg) + self.enable_detection = cfg.DETECTION.ENABLE + self.num_pathways = 1 + self._construct_network(cfg) + init_helper.init_weights( + self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN + ) + + def _construct_network(self, cfg): + """ + Builds a single pathway ResNet model. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + assert cfg.MODEL.ARCH in _POOL1.keys() + pool_size = _POOL1[cfg.MODEL.ARCH] + assert len({len(pool_size), self.num_pathways}) == 1 + assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys() + + (d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH] + + num_groups = cfg.RESNET.NUM_GROUPS + width_per_group = cfg.RESNET.WIDTH_PER_GROUP + dim_inner = num_groups * width_per_group + print(dim_inner) + + temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH] + + self.s1 = stem_helper.VideoModelStem( + dim_in=cfg.DATA.INPUT_CHANNEL_NUM, + dim_out=[width_per_group], + kernel=[temp_kernel[0][0] + [7, 7]], + stride=[[1, 2, 2]], + padding=[[temp_kernel[0][0][0] // 2, 3, 3]], + norm_module=self.norm_module, + ) + + self.s2 = resnet_helper.ResStage( + dim_in=[width_per_group], + dim_out=[width_per_group * 4], + dim_inner=[dim_inner], + temp_kernel_sizes=temp_kernel[1], + stride=cfg.RESNET.SPATIAL_STRIDES[0], + num_blocks=[d2], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0], + nonlocal_inds=cfg.NONLOCAL.LOCATION[0], + nonlocal_group=cfg.NONLOCAL.GROUP[0], + nonlocal_pool=cfg.NONLOCAL.POOL[0], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[0], + norm_module=self.norm_module, + ) + + for pathway in range(self.num_pathways): + pool = nn.MaxPool3d( + kernel_size=pool_size[pathway], + stride=pool_size[pathway], + padding=[0, 0, 0], + ) + self.add_module("pathway{}_pool".format(pathway), pool) + + self.s3 = resnet_helper.ResStage( + dim_in=[width_per_group * 4], + dim_out=[width_per_group * 8], + dim_inner=[dim_inner * 2], + temp_kernel_sizes=temp_kernel[2], + stride=cfg.RESNET.SPATIAL_STRIDES[1], + num_blocks=[d3], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1], + nonlocal_inds=cfg.NONLOCAL.LOCATION[1], + nonlocal_group=cfg.NONLOCAL.GROUP[1], + nonlocal_pool=cfg.NONLOCAL.POOL[1], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[1], + norm_module=self.norm_module, + ) + + self.s4 = resnet_helper.ResStage( + dim_in=[width_per_group * 8], + dim_out=[width_per_group * 16], + dim_inner=[dim_inner * 4], + temp_kernel_sizes=temp_kernel[3], + stride=cfg.RESNET.SPATIAL_STRIDES[2], + num_blocks=[d4], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2], + nonlocal_inds=cfg.NONLOCAL.LOCATION[2], + nonlocal_group=cfg.NONLOCAL.GROUP[2], + nonlocal_pool=cfg.NONLOCAL.POOL[2], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[2], + norm_module=self.norm_module, + ) + + self.s5 = resnet_helper.ResStage( + dim_in=[width_per_group * 16], + dim_out=[width_per_group * 32], + dim_inner=[dim_inner * 8], + temp_kernel_sizes=temp_kernel[4], + stride=cfg.RESNET.SPATIAL_STRIDES[3], + num_blocks=[d5], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3], + nonlocal_inds=cfg.NONLOCAL.LOCATION[3], + nonlocal_group=cfg.NONLOCAL.GROUP[3], + nonlocal_pool=cfg.NONLOCAL.POOL[3], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[3], + norm_module=self.norm_module, + ) + + if self.enable_detection: + raise NotImplementedError + else: + self.head = head_helper.ResNetBasicHead( + dim_in=[width_per_group * 32], + num_classes=cfg.MODEL.NUM_CLASSES, + pool_size=[None, None] + if cfg.MULTIGRID.SHORT_CYCLE + else [ + [ + cfg.DATA.NUM_FRAMES // pool_size[0][0], + cfg.DATA.CROP_SIZE // 32 // pool_size[0][1], + cfg.DATA.CROP_SIZE // 32 // pool_size[0][2], + ] + ], # None for AdaptiveAvgPool3d((1, 1, 1)) + dropout_rate=cfg.MODEL.DROPOUT_RATE, + act_func=cfg.MODEL.HEAD_ACT, + ) + + def forward(self, x, return_feat=False, bboxes=None): + x = self.s1(x) + x = self.s2(x) + for pathway in range(self.num_pathways): + pool = getattr(self, "pathway{}_pool".format(pathway)) + x[pathway] = pool(x[pathway]) + x = self.s3(x) + x = self.s4(x) + feat = self.s5(x) + if return_feat: + return feat + if self.enable_detection: + x = self.head(feat, bboxes) + else: + x = self.head(feat) + return x + +@MODEL_REGISTRY.register() +class ResNetVar(nn.Module): + """ + ResNet model builder. It builds a ResNet like network backbone without + lateral connection (C2D, I3D, Slow). + + Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. + "SlowFast networks for video recognition." + https://arxiv.org/pdf/1812.03982.pdf + + Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. + "Non-local neural networks." + https://arxiv.org/pdf/1711.07971.pdf + """ + + def __init__(self, cfg): + """ + The `__init__` method of any subclass should also contain these + arguments. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + super(ResNetVar, self).__init__() + self.norm_module = get_norm(cfg) + self.enable_detection = cfg.DETECTION.ENABLE + self.num_pathways = 1 + self._construct_network(cfg) + init_helper.init_weights( + self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN + ) + + def _construct_network(self, cfg): + """ + Builds a single pathway ResNet model. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + assert cfg.MODEL.ARCH in _POOL1.keys() + pool_size = _POOL1[cfg.MODEL.ARCH] + assert len({len(pool_size), self.num_pathways}) == 1 + assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys() + + (d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH] + + num_groups = cfg.RESNET.NUM_GROUPS + width_per_group = cfg.RESNET.WIDTH_PER_GROUP + dim_inner = num_groups * width_per_group + + temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH] + + self.s1 = stem_helper.VideoModelStem( + dim_in=cfg.DATA.INPUT_CHANNEL_NUM, + dim_out=[width_per_group], + kernel=[temp_kernel[0][0] + [7, 7]], + stride=[[1, 2, 2]], + padding=[[temp_kernel[0][0][0] // 2, 3, 3]], + norm_module=self.norm_module, + ) + + self.s2 = resnet_helper.ResStage( + dim_in=[width_per_group], + dim_out=[width_per_group * 4], + dim_inner=[dim_inner], + temp_kernel_sizes=temp_kernel[1], + stride=cfg.RESNET.SPATIAL_STRIDES[0], + num_blocks=[d2], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0], + nonlocal_inds=cfg.NONLOCAL.LOCATION[0], + nonlocal_group=cfg.NONLOCAL.GROUP[0], + nonlocal_pool=cfg.NONLOCAL.POOL[0], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[0], + norm_module=self.norm_module, + ) + + for pathway in range(self.num_pathways): + pool = nn.MaxPool3d( + kernel_size=pool_size[pathway], + stride=pool_size[pathway], + padding=[0, 0, 0], + ) + self.add_module("pathway{}_pool".format(pathway), pool) + + self.s3 = resnet_helper.ResStage( + dim_in=[width_per_group * 4], + dim_out=[width_per_group * 8], + dim_inner=[dim_inner * 2], + temp_kernel_sizes=temp_kernel[2], + stride=cfg.RESNET.SPATIAL_STRIDES[1], + num_blocks=[d3], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1], + nonlocal_inds=cfg.NONLOCAL.LOCATION[1], + nonlocal_group=cfg.NONLOCAL.GROUP[1], + nonlocal_pool=cfg.NONLOCAL.POOL[1], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[1], + norm_module=self.norm_module, + ) + + self.s4 = resnet_helper.ResStage( + dim_in=[width_per_group * 8], + dim_out=[width_per_group * 16], + dim_inner=[dim_inner * 4], + temp_kernel_sizes=temp_kernel[3], + stride=cfg.RESNET.SPATIAL_STRIDES[2], + num_blocks=[d4], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2], + nonlocal_inds=cfg.NONLOCAL.LOCATION[2], + nonlocal_group=cfg.NONLOCAL.GROUP[2], + nonlocal_pool=cfg.NONLOCAL.POOL[2], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[2], + norm_module=self.norm_module, + ) + + self.s5 = resnet_helper.ResStage( + dim_in=[width_per_group * 16], + dim_out=[width_per_group * 32], + dim_inner=[dim_inner * 8], + temp_kernel_sizes=temp_kernel[4], + stride=cfg.RESNET.SPATIAL_STRIDES[3], + num_blocks=[d5], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3], + nonlocal_inds=cfg.NONLOCAL.LOCATION[3], + nonlocal_group=cfg.NONLOCAL.GROUP[3], + nonlocal_pool=cfg.NONLOCAL.POOL[3], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[3], + norm_module=self.norm_module, + ) + + if self.enable_detection: + raise NotImplementedError + else: + self.head = head_helper.ResNetBasicHead( + dim_in=[width_per_group * 32], + num_classes=cfg.MODEL.NUM_CLASSES, + pool_size=[None], + dropout_rate=cfg.MODEL.DROPOUT_RATE, + act_func=cfg.MODEL.HEAD_ACT, + ) + + def forward(self, x, bboxes=None): + x = self.s1(x) + x = self.s2(x) + for pathway in range(self.num_pathways): + pool = getattr(self, "pathway{}_pool".format(pathway)) + x[pathway] = pool(x[pathway]) + x = self.s3(x) + x = self.s4(x) + x = self.s5(x) + if self.enable_detection: + x = self.head(x, bboxes) + else: + x = self.head(x) + return x + +@MODEL_REGISTRY.register() +class ResNetBase(nn.Module): + """ + ResNet model builder. It builds a ResNet like network backbone without + lateral connection (C2D, I3D, Slow). + + Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. + "SlowFast networks for video recognition." + https://arxiv.org/pdf/1812.03982.pdf + + Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. + "Non-local neural networks." + https://arxiv.org/pdf/1711.07971.pdf + """ + + def __init__(self, cfg): + """ + The `__init__` method of any subclass should also contain these + arguments. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + super(ResNetBase, self).__init__() + self.norm_module = get_norm(cfg) + self.enable_detection = cfg.DETECTION.ENABLE + self.num_pathways = 1 + self._construct_network(cfg) + init_helper.init_weights( + self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN + ) + + def _construct_network(self, cfg): + """ + Builds a single pathway ResNet model. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + assert cfg.MODEL.ARCH in _POOL1.keys() + pool_size = _POOL1[cfg.MODEL.ARCH] + assert len({len(pool_size), self.num_pathways}) == 1 + assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys() + + (d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH] + + num_groups = cfg.RESNET.NUM_GROUPS + width_per_group = cfg.RESNET.WIDTH_PER_GROUP + dim_inner = num_groups * width_per_group + + temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH] + + self.s1 = stem_helper.VideoModelStem( + dim_in=cfg.DATA.INPUT_CHANNEL_NUM, + dim_out=[width_per_group], + kernel=[temp_kernel[0][0] + [7, 7]], + stride=[[1, 2, 2]], + padding=[[temp_kernel[0][0][0] // 2, 3, 3]], + norm_module=self.norm_module, + ) + + self.s2 = resnet_helper.ResStage( + dim_in=[width_per_group], + dim_out=[width_per_group * 4], + dim_inner=[dim_inner], + temp_kernel_sizes=temp_kernel[1], + stride=cfg.RESNET.SPATIAL_STRIDES[0], + num_blocks=[d2], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0], + nonlocal_inds=cfg.NONLOCAL.LOCATION[0], + nonlocal_group=cfg.NONLOCAL.GROUP[0], + nonlocal_pool=cfg.NONLOCAL.POOL[0], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[0], + norm_module=self.norm_module, + ) + + for pathway in range(self.num_pathways): + pool = nn.MaxPool3d( + kernel_size=pool_size[pathway], + stride=pool_size[pathway], + padding=[0, 0, 0], + ) + self.add_module("pathway{}_pool".format(pathway), pool) + + self.s3 = resnet_helper.ResStage( + dim_in=[width_per_group * 4], + dim_out=[width_per_group * 8], + dim_inner=[dim_inner * 2], + temp_kernel_sizes=temp_kernel[2], + stride=cfg.RESNET.SPATIAL_STRIDES[1], + num_blocks=[d3], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1], + nonlocal_inds=cfg.NONLOCAL.LOCATION[1], + nonlocal_group=cfg.NONLOCAL.GROUP[1], + nonlocal_pool=cfg.NONLOCAL.POOL[1], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[1], + norm_module=self.norm_module, + ) + + self.s4 = resnet_helper.ResStage( + dim_in=[width_per_group * 8], + dim_out=[width_per_group * 16], + dim_inner=[dim_inner * 4], + temp_kernel_sizes=temp_kernel[3], + stride=cfg.RESNET.SPATIAL_STRIDES[2], + num_blocks=[d4], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2], + nonlocal_inds=cfg.NONLOCAL.LOCATION[2], + nonlocal_group=cfg.NONLOCAL.GROUP[2], + nonlocal_pool=cfg.NONLOCAL.POOL[2], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[2], + norm_module=self.norm_module, + ) + + self.s5 = resnet_helper.ResStage( + dim_in=[width_per_group * 16], + dim_out=[width_per_group * 32], + dim_inner=[dim_inner * 8], + temp_kernel_sizes=temp_kernel[4], + stride=cfg.RESNET.SPATIAL_STRIDES[3], + num_blocks=[d5], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3], + nonlocal_inds=cfg.NONLOCAL.LOCATION[3], + nonlocal_group=cfg.NONLOCAL.GROUP[3], + nonlocal_pool=cfg.NONLOCAL.POOL[3], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[3], + norm_module=self.norm_module, + ) + + if self.enable_detection: + raise NotImplementedError + else: + self.head = head_helper.ResNetBasicHead( + dim_in=[width_per_group * 32], + num_classes=cfg.MODEL.NUM_CLASSES, + pool_size=[None, None] + if cfg.MULTIGRID.SHORT_CYCLE + else [ + None + ], # None for AdaptiveAvgPool3d((1, 1, 1)) + dropout_rate=cfg.MODEL.DROPOUT_RATE, + act_func=cfg.MODEL.HEAD_ACT, + ) + + def forward(self, x, bboxes=None): + x = self.s1(x) + x = self.s2(x) + for pathway in range(self.num_pathways): + pool = getattr(self, "pathway{}_pool".format(pathway)) + x[pathway] = pool(x[pathway]) + x = self.s3(x) + x = self.s4(x) + x = self.s5(x) + if self.enable_detection: + x = self.head(x, bboxes) + else: + x = self.head(x) + return x + + +@MODEL_REGISTRY.register() +class ResNetFreeze(nn.Module): + """ + ResNet model builder. It builds a ResNet like network backbone without + lateral connection (C2D, I3D, Slow). + + Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. + "SlowFast networks for video recognition." + https://arxiv.org/pdf/1812.03982.pdf + + Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. + "Non-local neural networks." + https://arxiv.org/pdf/1711.07971.pdf + """ + + def __init__(self, cfg): + """ + The `__init__` method of any subclass should also contain these + arguments. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + super(ResNetFreeze, self).__init__() + self.norm_module = get_norm(cfg) + self.enable_detection = cfg.DETECTION.ENABLE + self.num_pathways = 1 + self._construct_network(cfg) + init_helper.init_weights( + self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN + ) + + def _construct_network(self, cfg): + """ + Builds a single pathway ResNet model. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + assert cfg.MODEL.ARCH in _POOL1.keys() + pool_size = _POOL1[cfg.MODEL.ARCH] + assert len({len(pool_size), self.num_pathways}) == 1 + assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys() + + (d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH] + + num_groups = cfg.RESNET.NUM_GROUPS + width_per_group = cfg.RESNET.WIDTH_PER_GROUP + dim_inner = num_groups * width_per_group + + temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH] + + self.s1 = stem_helper.VideoModelStem( + dim_in=cfg.DATA.INPUT_CHANNEL_NUM, + dim_out=[width_per_group], + kernel=[temp_kernel[0][0] + [7, 7]], + stride=[[1, 2, 2]], + padding=[[temp_kernel[0][0][0] // 2, 3, 3]], + norm_module=self.norm_module, + ) + + self.s2 = resnet_helper.ResStage( + dim_in=[width_per_group], + dim_out=[width_per_group * 4], + dim_inner=[dim_inner], + temp_kernel_sizes=temp_kernel[1], + stride=cfg.RESNET.SPATIAL_STRIDES[0], + num_blocks=[d2], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0], + nonlocal_inds=cfg.NONLOCAL.LOCATION[0], + nonlocal_group=cfg.NONLOCAL.GROUP[0], + nonlocal_pool=cfg.NONLOCAL.POOL[0], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[0], + norm_module=self.norm_module, + ) + + for pathway in range(self.num_pathways): + pool = nn.MaxPool3d( + kernel_size=pool_size[pathway], + stride=pool_size[pathway], + padding=[0, 0, 0], + ) + self.add_module("pathway{}_pool".format(pathway), pool) + + self.s3 = resnet_helper.ResStage( + dim_in=[width_per_group * 4], + dim_out=[width_per_group * 8], + dim_inner=[dim_inner * 2], + temp_kernel_sizes=temp_kernel[2], + stride=cfg.RESNET.SPATIAL_STRIDES[1], + num_blocks=[d3], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1], + nonlocal_inds=cfg.NONLOCAL.LOCATION[1], + nonlocal_group=cfg.NONLOCAL.GROUP[1], + nonlocal_pool=cfg.NONLOCAL.POOL[1], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[1], + norm_module=self.norm_module, + ) + + self.s4 = resnet_helper.ResStage( + dim_in=[width_per_group * 8], + dim_out=[width_per_group * 16], + dim_inner=[dim_inner * 4], + temp_kernel_sizes=temp_kernel[3], + stride=cfg.RESNET.SPATIAL_STRIDES[2], + num_blocks=[d4], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2], + nonlocal_inds=cfg.NONLOCAL.LOCATION[2], + nonlocal_group=cfg.NONLOCAL.GROUP[2], + nonlocal_pool=cfg.NONLOCAL.POOL[2], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[2], + norm_module=self.norm_module, + ) + + self.s5 = resnet_helper.ResStage( + dim_in=[width_per_group * 16], + dim_out=[width_per_group * 32], + dim_inner=[dim_inner * 8], + temp_kernel_sizes=temp_kernel[4], + stride=cfg.RESNET.SPATIAL_STRIDES[3], + num_blocks=[d5], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3], + nonlocal_inds=cfg.NONLOCAL.LOCATION[3], + nonlocal_group=cfg.NONLOCAL.GROUP[3], + nonlocal_pool=cfg.NONLOCAL.POOL[3], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[3], + norm_module=self.norm_module, + ) + + if self.enable_detection: + raise NotImplementedError + else: + self.head = head_helper.ResNetBasicHead( + dim_in=[width_per_group * 32], + num_classes=cfg.MODEL.NUM_CLASSES, + pool_size=[None,None] + if cfg.MULTIGRID.SHORT_CYCLE + else [ + None + ], # None for AdaptiveAvgPool3d((1, 1, 1)) + dropout_rate=cfg.MODEL.DROPOUT_RATE, + act_func=cfg.MODEL.HEAD_ACT, + ) + + def forward(self, x, freeze_backbone=False): + assert isinstance(freeze_backbone,bool) + x = self.s1(x) + x = self.s2(x) + # for pathway in range(self.num_pathways): + # pool = getattr(self, "pathway{}_pool".format(pathway)) + # x[pathway] = pool(x[pathway]) + x = self.s3(x) + x = self.s4(x) + x = self.s5(x) + if freeze_backbone: + x=[item.detach() for item in x] + + x = self.head(x) + return x + + + +import torch.nn.functional as F +from .unet_helper import DecoderBlock,LightDecoderBlock,ResDecoderBlock + + +@MODEL_REGISTRY.register() +class ResUNet(nn.Module): + """ + ResNet model builder. It builds a ResNet like network backbone without + lateral connection (C2D, I3D, Slow). + + Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. + "SlowFast networks for video recognition." + https://arxiv.org/pdf/1812.03982.pdf + + Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. + "Non-local neural networks." + https://arxiv.org/pdf/1711.07971.pdf + """ + + def __init__(self, cfg): + """ + The `__init__` method of any subclass should also contain these + arguments. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + super(ResUNet, self).__init__() + self.norm_module = get_norm(cfg) + self.enable_detection = cfg.DETECTION.ENABLE + self.enable_jitter = cfg.JITTER.ENABLE + self.num_pathways = 1 + assert cfg.DATA.TRAIN_CROP_SIZE == cfg.DATA.TEST_CROP_SIZE + self.image_size = cfg.DATA.TRAIN_CROP_SIZE + self.clip_size = cfg.DATA.NUM_FRAMES + self._construct_network(cfg) + init_helper.init_weights( + self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN + ) + + def _construct_network(self, cfg): + """ + Builds a single pathway ResNet model. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + assert cfg.MODEL.ARCH in _POOL1.keys() + pool_size = _POOL1[cfg.MODEL.ARCH] + self.cfg = cfg + assert len({len(pool_size), self.num_pathways}) == 1 + assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys() + + (d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH] + + num_groups = cfg.RESNET.NUM_GROUPS + width_per_group = cfg.RESNET.WIDTH_PER_GROUP + dim_inner = num_groups * width_per_group + + temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH] + + self.s1 = stem_helper.VideoModelStem( + dim_in=cfg.DATA.INPUT_CHANNEL_NUM, + dim_out=[width_per_group], + kernel=[temp_kernel[0][0] + [7, 7]], + stride=[[1, 2, 2]], + padding=[[temp_kernel[0][0][0] // 2, 3, 3]], + norm_module=self.norm_module, + ) + + self.s2 = resnet_helper.ResStage( + dim_in=[width_per_group], + dim_out=[width_per_group * 4], + dim_inner=[dim_inner], + temp_kernel_sizes=temp_kernel[1], + stride=cfg.RESNET.SPATIAL_STRIDES[0], + num_blocks=[d2], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0], + nonlocal_inds=cfg.NONLOCAL.LOCATION[0], + nonlocal_group=cfg.NONLOCAL.GROUP[0], + nonlocal_pool=cfg.NONLOCAL.POOL[0], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[0], + norm_module=self.norm_module, + ) + + for pathway in range(self.num_pathways): + pool = nn.MaxPool3d( + kernel_size=pool_size[pathway], + stride=pool_size[pathway], + padding=[0, 0, 0], + ) + self.add_module("pathway{}_pool".format(pathway), pool) + + self.s3 = resnet_helper.ResStage( + dim_in=[width_per_group * 4], + dim_out=[width_per_group * 8], + dim_inner=[dim_inner * 2], + temp_kernel_sizes=temp_kernel[2], + stride=cfg.RESNET.SPATIAL_STRIDES[1], + num_blocks=[d3], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1], + nonlocal_inds=cfg.NONLOCAL.LOCATION[1], + nonlocal_group=cfg.NONLOCAL.GROUP[1], + nonlocal_pool=cfg.NONLOCAL.POOL[1], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[1], + norm_module=self.norm_module, + ) + + self.s4 = resnet_helper.ResStage( + dim_in=[width_per_group * 8], + dim_out=[width_per_group * 16], + dim_inner=[dim_inner * 4], + temp_kernel_sizes=temp_kernel[3], + stride=cfg.RESNET.SPATIAL_STRIDES[2], + num_blocks=[d4], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2], + nonlocal_inds=cfg.NONLOCAL.LOCATION[2], + nonlocal_group=cfg.NONLOCAL.GROUP[2], + nonlocal_pool=cfg.NONLOCAL.POOL[2], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[2], + norm_module=self.norm_module, + ) + + # self.s5 = resnet_helper.ResStage( + # dim_in=[width_per_group * 16], + # dim_out=[width_per_group * 32], + # dim_inner=[dim_inner * 8], + # temp_kernel_sizes=temp_kernel[4], + # stride=cfg.RESNET.SPATIAL_STRIDES[3], + # num_blocks=[d5], + # num_groups=[num_groups], + # num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3], + # nonlocal_inds=cfg.NONLOCAL.LOCATION[3], + # nonlocal_group=cfg.NONLOCAL.GROUP[3], + # nonlocal_pool=cfg.NONLOCAL.POOL[3], + # instantiation=cfg.NONLOCAL.INSTANTIATION, + # trans_func_name=cfg.RESNET.TRANS_FUNC, + # stride_1x1=cfg.RESNET.STRIDE_1X1, + # inplace_relu=cfg.RESNET.INPLACE_RELU, + # dilation=cfg.RESNET.SPATIAL_DILATIONS[3], + # norm_module=self.norm_module, + # ) + self.labels=["rotate","light"] + self.dual_define("t4",self.labels,DecoderBlock(width_per_group * 16,width_per_group * 8,width_per_group * 8)) + self.dual_define("t3",self.labels,DecoderBlock(width_per_group * 8,width_per_group * 4, 256)) + self.dual_define("conv1x1",self.labels,nn.Sequential( + nn.Conv3d(width_per_group*4+width_per_group, 1, kernel_size=(1, 1, 1), stride=1, padding=0), nn.Sigmoid() + )) + + self.linear = nn.Sequential(nn.Linear(1, 1), nn.Sigmoid()) + + def forward_plus(self, x, y, net): + return [net(x)[0] + y[0]] + + + def dual_define(self,name,labels,net): + for label in labels: + self.add_module(f"{name}_{label}",copy.deepcopy(net)) + + + + def upsample(self, x, dims=["space"]): + ori_size = x[0].shape[2:5] + t, h, w = ori_size + if "space" in dims: + h = 2 * h + w = 2 * w + if "time" in dims: + t = 2 * t + size = (t, h, w) + return [F.interpolate(x[0], size)] + + def concat(self,x,y): + return [torch.cat([x[0],y[0]],1)] + + + + # @torchsnooper.snoop() + def forward(self, x, bboxes=None): + x1 = self.s1(x) # 1,64,8,56,56 + x2 = self.s2(x1) # 1,256,8,56,56 + x3 = self.s3(x2) # 1,512,8,28, 28 + x = self.s4(x3) # 1,1024,8,14,14 + x = self.upsample(x) # 1,1024, 8, 28, 28 + x = self.concat(x3,x)# 1,1024+512, 8, 28, 28 + x=[self.forward_branch(x,x1,x2,label) for label in self.labels] + x=torch.cat(x,1) + out = x.mean([3, 4]).view(-1, 1)*100 + out = self.linear(out) + out = out.view(x.size(0), -1) + return x,out + + + + def forward_branch(self,x,x1,x2,label): + t4=getattr(self,f"t4_{label}") + x = t4(x[0])# 1,512, 8, 28, 28 + x = self.upsample([x]) # 1,512, 8, 56, 56 + x = self.concat(x2,x)# 1,256+512, 8, 56, 56 + t3= getattr(self,f"t3_{label}") + x = t3(x[0]) # 1,256, 8, 56, 56 + x = self.concat(x1,[x]) # 1,320, 8, 56, 56 + conv1x1=getattr(self,f"conv1x1_{label}") + x = conv1x1(x[0]) # 1,2,8,56,56 + return x + + + +@MODEL_REGISTRY.register() +class ResUNetLight(nn.Module): + """ + ResNet model builder. It builds a ResNet like network backbone without + lateral connection (C2D, I3D, Slow). + + Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. + "SlowFast networks for video recognition." + https://arxiv.org/pdf/1812.03982.pdf + + Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. + "Non-local neural networks." + https://arxiv.org/pdf/1711.07971.pdf + """ + + def __init__(self, cfg): + """ + The `__init__` method of any subclass should also contain these + arguments. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + super(ResUNetLight, self).__init__() + self.norm_module = get_norm(cfg) + self.enable_detection = cfg.DETECTION.ENABLE + self.enable_jitter = cfg.JITTER.ENABLE + self.num_pathways = 1 + assert cfg.DATA.TRAIN_CROP_SIZE == cfg.DATA.TEST_CROP_SIZE + self.image_size = cfg.DATA.TRAIN_CROP_SIZE + self.clip_size = cfg.DATA.NUM_FRAMES + self._construct_network(cfg) + init_helper.init_weights( + self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN + ) + + def _construct_network(self, cfg): + """ + Builds a single pathway ResNet model. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + assert cfg.MODEL.ARCH in _POOL1.keys() + pool_size = _POOL1[cfg.MODEL.ARCH] + self.cfg = cfg + assert len({len(pool_size), self.num_pathways}) == 1 + assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys() + + (d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH] + + num_groups = cfg.RESNET.NUM_GROUPS + width_per_group = cfg.RESNET.WIDTH_PER_GROUP + dim_inner = num_groups * width_per_group + + temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH] + + self.s1 = stem_helper.VideoModelStem( + dim_in=cfg.DATA.INPUT_CHANNEL_NUM, + dim_out=[width_per_group], + kernel=[temp_kernel[0][0] + [7, 7]], + stride=[[1, 2, 2]], + padding=[[temp_kernel[0][0][0] // 2, 3, 3]], + norm_module=self.norm_module, + ) + + self.s2 = resnet_helper.ResStage( + dim_in=[width_per_group], + dim_out=[width_per_group * 4], + dim_inner=[dim_inner], + temp_kernel_sizes=temp_kernel[1], + stride=cfg.RESNET.SPATIAL_STRIDES[0], + num_blocks=[d2], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0], + nonlocal_inds=cfg.NONLOCAL.LOCATION[0], + nonlocal_group=cfg.NONLOCAL.GROUP[0], + nonlocal_pool=cfg.NONLOCAL.POOL[0], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[0], + norm_module=self.norm_module, + ) + + for pathway in range(self.num_pathways): + pool = nn.MaxPool3d( + kernel_size=pool_size[pathway], + stride=pool_size[pathway], + padding=[0, 0, 0], + ) + self.add_module("pathway{}_pool".format(pathway), pool) + + self.s3 = resnet_helper.ResStage( + dim_in=[width_per_group * 4], + dim_out=[width_per_group * 8], + dim_inner=[dim_inner * 2], + temp_kernel_sizes=temp_kernel[2], + stride=cfg.RESNET.SPATIAL_STRIDES[1], + num_blocks=[d3], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1], + nonlocal_inds=cfg.NONLOCAL.LOCATION[1], + nonlocal_group=cfg.NONLOCAL.GROUP[1], + nonlocal_pool=cfg.NONLOCAL.POOL[1], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[1], + norm_module=self.norm_module, + ) + + self.s4 = resnet_helper.ResStage( + dim_in=[width_per_group * 8], + dim_out=[width_per_group * 16], + dim_inner=[dim_inner * 4], + temp_kernel_sizes=temp_kernel[3], + stride=cfg.RESNET.SPATIAL_STRIDES[2], + num_blocks=[d4], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2], + nonlocal_inds=cfg.NONLOCAL.LOCATION[2], + nonlocal_group=cfg.NONLOCAL.GROUP[2], + nonlocal_pool=cfg.NONLOCAL.POOL[2], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[2], + norm_module=self.norm_module, + ) + + # self.s5 = resnet_helper.ResStage( + # dim_in=[width_per_group * 16], + # dim_out=[width_per_group * 32], + # dim_inner=[dim_inner * 8], + # temp_kernel_sizes=temp_kernel[4], + # stride=cfg.RESNET.SPATIAL_STRIDES[3], + # num_blocks=[d5], + # num_groups=[num_groups], + # num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3], + # nonlocal_inds=cfg.NONLOCAL.LOCATION[3], + # nonlocal_group=cfg.NONLOCAL.GROUP[3], + # nonlocal_pool=cfg.NONLOCAL.POOL[3], + # instantiation=cfg.NONLOCAL.INSTANTIATION, + # trans_func_name=cfg.RESNET.TRANS_FUNC, + # stride_1x1=cfg.RESNET.STRIDE_1X1, + # inplace_relu=cfg.RESNET.INPLACE_RELU, + # dilation=cfg.RESNET.SPATIAL_DILATIONS[3], + # norm_module=self.norm_module, + # ) + self.labels=["rotate","light"] + self.dual_define("t4",self.labels,LightDecoderBlock(width_per_group * 16,width_per_group * 8,width_per_group * 4)) + self.dual_define("t3",self.labels,LightDecoderBlock(width_per_group * 4,width_per_group * 4, 128)) + self.dual_define("conv1x1",self.labels,nn.Sequential( + nn.Conv3d(128+width_per_group, 1, kernel_size=(1, 1, 1), stride=1, padding=0), nn.Sigmoid() + )) + + self.linear = nn.Sequential(nn.Linear(1, 1), nn.Sigmoid()) + + def forward_plus(self, x, y, net): + return [net(x)[0] + y[0]] + + + def dual_define(self,name,labels,net): + for label in labels: + self.add_module(f"{name}_{label}",copy.deepcopy(net)) + + + + def upsample(self, x, dims=["space"]): + ori_size = x[0].shape[2:5] + t, h, w = ori_size + if "space" in dims: + h = 2 * h + w = 2 * w + if "time" in dims: + t = 2 * t + size = (t, h, w) + return [F.interpolate(x[0], size)] + + def concat(self,x,y): + return [torch.cat([x[0],y[0]],1)] + + def get_detach_var(self,x): + return [t.detach() for t in x] + + # @torchsnooper.snoop() + def forward(self, x, freeze_backbone=False): + x1 = self.s1(x) # 1,64,8,56,56 + x2 = self.s2(x1) # 1,256,8,56,56 + x3 = self.s3(x2) # 1,512,8,28, 28 + x = self.s4(x3) # 1,1024,8,14,14 + assert isinstance(freeze_backbone,bool) + if freeze_backbone: + x=self.get_detach_var(x) + x1=self.get_detach_var(x1) + x2=self.get_detach_var(x2) + x3=self.get_detach_var(x3) + + x = self.upsample(x) # 1,1024, 8, 28, 28 + x = self.concat(x3,x)# 1,1024+512, 8, 28, 28 + x=[self.forward_branch(x,x1,x2,label) for label in self.labels] + x=torch.cat(x,1) + out = x.mean([3, 4]).view(-1, 1)*100 # 1,2,8,56,56 + out = self.linear(out) + out = out.view(x.size(0), -1) + return x,out + + + + def forward_branch(self,x,x1,x2,label): + t4=getattr(self,f"t4_{label}") + x = t4(x[0])# 1,256, 8, 28, 28 + x = self.upsample([x]) # 1,256, 8, 56, 56 + x = self.concat(x2,x)# 1,256+256, 8, 56, 56 + t3= getattr(self,f"t3_{label}") + x = t3(x[0]) # 1,128, 8, 56, 56 + x = self.concat(x1,[x]) # 1,192, 8, 56, 56 + conv1x1=getattr(self,f"conv1x1_{label}") + x = conv1x1(x[0]) # 1,2,8,56,56 + return x + + + +@MODEL_REGISTRY.register() +class ResUNetLightFix(nn.Module): + """ + ResNet model builder. It builds a ResNet like network backbone without + lateral connection (C2D, I3D, Slow). + + Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. + "SlowFast networks for video recognition." + https://arxiv.org/pdf/1812.03982.pdf + + Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. + "Non-local neural networks." + https://arxiv.org/pdf/1711.07971.pdf + """ + + def __init__(self, cfg): + """ + The `__init__` method of any subclass should also contain these + arguments. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + super(ResUNetLightFix, self).__init__() + self.norm_module = get_norm(cfg) + self.enable_detection = cfg.DETECTION.ENABLE + self.enable_jitter = cfg.JITTER.ENABLE + self.num_pathways = 1 + assert cfg.DATA.TRAIN_CROP_SIZE == cfg.DATA.TEST_CROP_SIZE + self.image_size = cfg.DATA.TRAIN_CROP_SIZE + self.clip_size = cfg.DATA.NUM_FRAMES + self._construct_network(cfg) + init_helper.init_weights( + self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN + ) + + def _construct_network(self, cfg): + """ + Builds a single pathway ResNet model. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + assert cfg.MODEL.ARCH in _POOL1.keys() + pool_size = _POOL1[cfg.MODEL.ARCH] + self.cfg = cfg + assert len({len(pool_size), self.num_pathways}) == 1 + assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys() + + (d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH] + + num_groups = cfg.RESNET.NUM_GROUPS + width_per_group = cfg.RESNET.WIDTH_PER_GROUP + dim_inner = num_groups * width_per_group + + temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH] + + self.s1 = stem_helper.VideoModelStem( + dim_in=cfg.DATA.INPUT_CHANNEL_NUM, + dim_out=[width_per_group], + kernel=[temp_kernel[0][0] + [7, 7]], + stride=[[1, 2, 2]], + padding=[[temp_kernel[0][0][0] // 2, 3, 3]], + norm_module=self.norm_module, + ) + + self.s2 = resnet_helper.ResStage( + dim_in=[width_per_group], + dim_out=[width_per_group * 4], + dim_inner=[dim_inner], + temp_kernel_sizes=temp_kernel[1], + stride=cfg.RESNET.SPATIAL_STRIDES[0], + num_blocks=[d2], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0], + nonlocal_inds=cfg.NONLOCAL.LOCATION[0], + nonlocal_group=cfg.NONLOCAL.GROUP[0], + nonlocal_pool=cfg.NONLOCAL.POOL[0], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[0], + norm_module=self.norm_module, + ) + + for pathway in range(self.num_pathways): + pool = nn.MaxPool3d( + kernel_size=pool_size[pathway], + stride=pool_size[pathway], + padding=[0, 0, 0], + ) + self.add_module("pathway{}_pool".format(pathway), pool) + + self.s3 = resnet_helper.ResStage( + dim_in=[width_per_group * 4], + dim_out=[width_per_group * 8], + dim_inner=[dim_inner * 2], + temp_kernel_sizes=temp_kernel[2], + stride=cfg.RESNET.SPATIAL_STRIDES[1], + num_blocks=[d3], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1], + nonlocal_inds=cfg.NONLOCAL.LOCATION[1], + nonlocal_group=cfg.NONLOCAL.GROUP[1], + nonlocal_pool=cfg.NONLOCAL.POOL[1], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[1], + norm_module=self.norm_module, + ) + + self.s4 = resnet_helper.ResStage( + dim_in=[width_per_group * 8], + dim_out=[width_per_group * 16], + dim_inner=[dim_inner * 4], + temp_kernel_sizes=temp_kernel[3], + stride=cfg.RESNET.SPATIAL_STRIDES[2], + num_blocks=[d4], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2], + nonlocal_inds=cfg.NONLOCAL.LOCATION[2], + nonlocal_group=cfg.NONLOCAL.GROUP[2], + nonlocal_pool=cfg.NONLOCAL.POOL[2], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[2], + norm_module=self.norm_module, + ) + + # self.s5 = resnet_helper.ResStage( + # dim_in=[width_per_group * 16], + # dim_out=[width_per_group * 32], + # dim_inner=[dim_inner * 8], + # temp_kernel_sizes=temp_kernel[4], + # stride=cfg.RESNET.SPATIAL_STRIDES[3], + # num_blocks=[d5], + # num_groups=[num_groups], + # num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3], + # nonlocal_inds=cfg.NONLOCAL.LOCATION[3], + # nonlocal_group=cfg.NONLOCAL.GROUP[3], + # nonlocal_pool=cfg.NONLOCAL.POOL[3], + # instantiation=cfg.NONLOCAL.INSTANTIATION, + # trans_func_name=cfg.RESNET.TRANS_FUNC, + # stride_1x1=cfg.RESNET.STRIDE_1X1, + # inplace_relu=cfg.RESNET.INPLACE_RELU, + # dilation=cfg.RESNET.SPATIAL_DILATIONS[3], + # norm_module=self.norm_module, + # ) + self.labels=["rotate","light","skip"] + self.dual_define("t4",self.labels,LightDecoderBlock(width_per_group * 16,width_per_group * 8,width_per_group * 4)) + self.dual_define("t3",self.labels,LightDecoderBlock(width_per_group * 4,width_per_group * 4, 128)) + self.dual_define("conv1x1",self.labels,nn.Sequential( + nn.Conv3d(128+width_per_group, 64, kernel_size=(1, 1, 1), stride=1, padding=0), + nn.BatchNorm3d(64), + nn.ReLU(), + nn.Conv3d(64, 1, kernel_size=(1, 1, 1), stride=1, padding=0), + )) + + self.linear = nn.Sequential(nn.Linear(1, 1)) + + def forward_plus(self, x, y, net): + return [net(x)[0] + y[0]] + + + def dual_define(self,name,labels,net): + for label in labels: + self.add_module(f"{name}_{label}",copy.deepcopy(net)) + + + + def upsample(self, x, dims=["space"]): + ori_size = x[0].shape[2:5] + t, h, w = ori_size + if "space" in dims: + h = 2 * h + w = 2 * w + if "time" in dims: + t = 2 * t + size = (t, h, w) + return [F.interpolate(x[0], size)] + + def concat(self,x,y): + return [torch.cat([x[0],y[0]],1)] + + def get_detach_var(self,x): + return [t.detach() for t in x] + + # @torchsnooper.snoop() + def forward(self, x, freeze_backbone=False): + x1 = self.s1(x) # 1,64,8,56,56 + x2 = self.s2(x1) # 1,256,8,56,56 + x3 = self.s3(x2) # 1,512,8,28, 28 + x = self.s4(x3) # 1,1024,8,14,14 + assert isinstance(freeze_backbone,bool) + if freeze_backbone: + x=self.get_detach_var(x) + x1=self.get_detach_var(x1) + x2=self.get_detach_var(x2) + x3=self.get_detach_var(x3) + + x = self.upsample(x) # 1,1024, 8, 28, 28 + x = self.concat(x3,x)# 1,1024+512, 8, 28, 28 + x=[self.forward_branch(x,x1,x2,label) for label in self.labels] + x=torch.cat(x,1) + x=torch.sigmoid(x) + out = x.mean([3, 4]).view(-1, 1)*100 # 1,2,8,56,56 + out = self.linear(out) + out = out.view(x.size(0), -1) + out = torch.sigmoid(out) + return x,out + + + + def forward_branch(self,x,x1,x2,label): + t4=getattr(self,f"t4_{label}") + x = t4(x[0])# 1,256, 8, 28, 28 + x = self.upsample([x]) # 1,256, 8, 56, 56 + x = self.concat(x2,x)# 1,256+256, 8, 56, 56 + t3= getattr(self,f"t3_{label}") + x = t3(x[0]) # 1,128, 8, 56, 56 + x = self.concat(x1,[x]) # 1,192, 8, 56, 56 + conv1x1=getattr(self,f"conv1x1_{label}") + x = conv1x1(x[0]) # 1,2,8,56,56 + return x + + + +@MODEL_REGISTRY.register() +class ResUNetContinus(nn.Module): + """ + ResNet model builder. It builds a ResNet like network backbone without + lateral connection (C2D, I3D, Slow). + + Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. + "SlowFast networks for video recognition." + https://arxiv.org/pdf/1812.03982.pdf + + Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. + "Non-local neural networks." + https://arxiv.org/pdf/1711.07971.pdf + """ + + def __init__(self, cfg): + """ + The `__init__` method of any subclass should also contain these + arguments. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + super(ResUNetContinus, self).__init__() + self.norm_module = get_norm(cfg) + self.enable_detection = cfg.DETECTION.ENABLE + self.enable_jitter = cfg.JITTER.ENABLE + self.num_pathways = 1 + assert cfg.DATA.TRAIN_CROP_SIZE == cfg.DATA.TEST_CROP_SIZE + self.image_size = cfg.DATA.TRAIN_CROP_SIZE + self.clip_size = cfg.DATA.NUM_FRAMES + self._construct_network(cfg) + init_helper.init_weights( + self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN + ) + + def _construct_network(self, cfg): + """ + Builds a single pathway ResNet model. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + assert cfg.MODEL.ARCH in _POOL1.keys() + pool_size = _POOL1[cfg.MODEL.ARCH] + self.cfg = cfg + assert len({len(pool_size), self.num_pathways}) == 1 + assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys() + + (d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH] + + num_groups = cfg.RESNET.NUM_GROUPS + width_per_group = cfg.RESNET.WIDTH_PER_GROUP + dim_inner = num_groups * width_per_group + + temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH] + + self.s1 = stem_helper.VideoModelStem( + dim_in=cfg.DATA.INPUT_CHANNEL_NUM, + dim_out=[width_per_group], + kernel=[temp_kernel[0][0] + [7, 7]], + stride=[[1, 2, 2]], + padding=[[temp_kernel[0][0][0] // 2, 3, 3]], + norm_module=self.norm_module, + ) + + self.s2 = resnet_helper.ResStage( + dim_in=[width_per_group], + dim_out=[width_per_group * 4], + dim_inner=[dim_inner], + temp_kernel_sizes=temp_kernel[1], + stride=cfg.RESNET.SPATIAL_STRIDES[0], + num_blocks=[d2], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0], + nonlocal_inds=cfg.NONLOCAL.LOCATION[0], + nonlocal_group=cfg.NONLOCAL.GROUP[0], + nonlocal_pool=cfg.NONLOCAL.POOL[0], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[0], + norm_module=self.norm_module, + ) + + for pathway in range(self.num_pathways): + pool = nn.MaxPool3d( + kernel_size=pool_size[pathway], + stride=pool_size[pathway], + padding=[0, 0, 0], + ) + self.add_module("pathway{}_pool".format(pathway), pool) + + self.s3 = resnet_helper.ResStage( + dim_in=[width_per_group * 4], + dim_out=[width_per_group * 8], + dim_inner=[dim_inner * 2], + temp_kernel_sizes=temp_kernel[2], + stride=cfg.RESNET.SPATIAL_STRIDES[1], + num_blocks=[d3], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1], + nonlocal_inds=cfg.NONLOCAL.LOCATION[1], + nonlocal_group=cfg.NONLOCAL.GROUP[1], + nonlocal_pool=cfg.NONLOCAL.POOL[1], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[1], + norm_module=self.norm_module, + ) + + self.s4 = resnet_helper.ResStage( + dim_in=[width_per_group * 8], + dim_out=[width_per_group * 16], + dim_inner=[dim_inner * 4], + temp_kernel_sizes=temp_kernel[3], + stride=cfg.RESNET.SPATIAL_STRIDES[2], + num_blocks=[d4], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2], + nonlocal_inds=cfg.NONLOCAL.LOCATION[2], + nonlocal_group=cfg.NONLOCAL.GROUP[2], + nonlocal_pool=cfg.NONLOCAL.POOL[2], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[2], + norm_module=self.norm_module, + ) + + # self.s5 = resnet_helper.ResStage( + # dim_in=[width_per_group * 16], + # dim_out=[width_per_group * 32], + # dim_inner=[dim_inner * 8], + # temp_kernel_sizes=temp_kernel[4], + # stride=cfg.RESNET.SPATIAL_STRIDES[3], + # num_blocks=[d5], + # num_groups=[num_groups], + # num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3], + # nonlocal_inds=cfg.NONLOCAL.LOCATION[3], + # nonlocal_group=cfg.NONLOCAL.GROUP[3], + # nonlocal_pool=cfg.NONLOCAL.POOL[3], + # instantiation=cfg.NONLOCAL.INSTANTIATION, + # trans_func_name=cfg.RESNET.TRANS_FUNC, + # stride_1x1=cfg.RESNET.STRIDE_1X1, + # inplace_relu=cfg.RESNET.INPLACE_RELU, + # dilation=cfg.RESNET.SPATIAL_DILATIONS[3], + # norm_module=self.norm_module, + # ) + self.labels=["all"] + self.dual_define("t4",self.labels,LightDecoderBlock(width_per_group * 16,width_per_group * 8,width_per_group * 4)) + self.dual_define("t3",self.labels,LightDecoderBlock(width_per_group * 4,width_per_group * 4, 128)) + self.dual_define("conv1x1",self.labels,nn.Sequential( + nn.Conv3d(128+width_per_group, 64, kernel_size=(1, 1, 1), stride=1, padding=0), + nn.BatchNorm3d(64), + nn.ReLU(), + nn.Conv3d(64, 1, kernel_size=(1, 1, 1), stride=1, padding=0), + )) + + self.linear = nn.Sequential(nn.Linear(1, 1)) + + def forward_plus(self, x, y, net): + return [net(x)[0] + y[0]] + + + def dual_define(self,name,labels,net): + for label in labels: + self.add_module(f"{name}_{label}",copy.deepcopy(net)) + + + + def upsample(self, x, dims=["space"]): + ori_size = x[0].shape[2:5] + t, h, w = ori_size + if "space" in dims: + h = 2 * h + w = 2 * w + if "time" in dims: + t = 2 * t + size = (t, h, w) + return [F.interpolate(x[0], size)] + + def concat(self,x,y): + return [torch.cat([x[0],y[0]],1)] + + def get_detach_var(self,x): + return [t.detach() for t in x] + + # @torchsnooper.snoop() + def forward(self, x, freeze_backbone=False): + x1 = self.s1(x) # 1,64,8,56,56 + x2 = self.s2(x1) # 1,256,8,56,56 + x3 = self.s3(x2) # 1,512,8,28, 28 + x = self.s4(x3) # 1,1024,8,14,14 + assert isinstance(freeze_backbone,bool) + if freeze_backbone: + x=self.get_detach_var(x) + x1=self.get_detach_var(x1) + x2=self.get_detach_var(x2) + x3=self.get_detach_var(x3) + + x = self.upsample(x) # 1,1024, 8, 28, 28 + x = self.concat(x3,x)# 1,1024+512, 8, 28, 28 + x=[self.forward_branch(x,x1,x2,label) for label in self.labels] + x=torch.cat(x,1) + x=torch.sigmoid(x) + out = x.mean([3, 4]).view(-1, 1)*100 # 1,2,8,56,56 + out = self.linear(out) + out = out.view(x.size(0), -1) + out = torch.sigmoid(out) + return x,out + + + def forward_branch(self,x,x1,x2,label): + t4= getattr(self,f"t4_{label}") + x = t4(x[0])# 1,256, 8, 28, 28 + x = self.upsample([x]) # 1,256, 8, 56, 56 + x = self.concat(x2,x)# 1,256+256, 8, 56, 56 + t3= getattr(self,f"t3_{label}") + x = t3(x[0]) # 1,128, 8, 56, 56 + x = self.concat(x1,[x]) # 1,192, 8, 56, 56 + conv1x1=getattr(self,f"conv1x1_{label}") + x = conv1x1(x[0]) # 1,2,8,56,56 + return x + + + + +@MODEL_REGISTRY.register() +class ResUNetCommon(nn.Module): + """ + ResNet model builder. It builds a ResNet like network backbone without + lateral connection (C2D, I3D, Slow). + + Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. + "SlowFast networks for video recognition." + https://arxiv.org/pdf/1812.03982.pdf + + Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. + "Non-local neural networks." + https://arxiv.org/pdf/1711.07971.pdf + """ + + def __init__(self, cfg): + """ + The `__init__` method of any subclass should also contain these + arguments. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + super(ResUNetCommon, self).__init__() + self.norm_module = get_norm(cfg) + self.enable_detection = cfg.DETECTION.ENABLE + self.enable_jitter = cfg.JITTER.ENABLE + self.num_pathways = 1 + assert cfg.DATA.TRAIN_CROP_SIZE == cfg.DATA.TEST_CROP_SIZE + self.image_size = cfg.DATA.TRAIN_CROP_SIZE + self.clip_size = cfg.DATA.NUM_FRAMES + self._construct_network(cfg) + init_helper.init_weights( + self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN + ) + + def _construct_network(self, cfg): + """ + Builds a single pathway ResNet model. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + assert cfg.MODEL.ARCH in _POOL1.keys() + pool_size = _POOL1[cfg.MODEL.ARCH] + self.cfg = cfg + assert len({len(pool_size), self.num_pathways}) == 1 + assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys() + + (d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH] + + num_groups = cfg.RESNET.NUM_GROUPS + width_per_group = cfg.RESNET.WIDTH_PER_GROUP + dim_inner = num_groups * width_per_group + + temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH] + + self.s1 = stem_helper.VideoModelStem( + dim_in=cfg.DATA.INPUT_CHANNEL_NUM, + dim_out=[width_per_group], + kernel=[temp_kernel[0][0] + [7, 7]], + stride=[[1, 2, 2]], + padding=[[temp_kernel[0][0][0] // 2, 3, 3]], + norm_module=self.norm_module, + ) + + self.s2 = resnet_helper.ResStage( + dim_in=[width_per_group], + dim_out=[width_per_group * 4], + dim_inner=[dim_inner], + temp_kernel_sizes=temp_kernel[1], + stride=cfg.RESNET.SPATIAL_STRIDES[0], + num_blocks=[d2], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0], + nonlocal_inds=cfg.NONLOCAL.LOCATION[0], + nonlocal_group=cfg.NONLOCAL.GROUP[0], + nonlocal_pool=cfg.NONLOCAL.POOL[0], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[0], + norm_module=self.norm_module, + ) + + for pathway in range(self.num_pathways): + pool = nn.MaxPool3d( + kernel_size=pool_size[pathway], + stride=pool_size[pathway], + padding=[0, 0, 0], + ) + self.add_module("pathway{}_pool".format(pathway), pool) + + self.s3 = resnet_helper.ResStage( + dim_in=[width_per_group * 4], + dim_out=[width_per_group * 8], + dim_inner=[dim_inner * 2], + temp_kernel_sizes=temp_kernel[2], + stride=cfg.RESNET.SPATIAL_STRIDES[1], + num_blocks=[d3], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1], + nonlocal_inds=cfg.NONLOCAL.LOCATION[1], + nonlocal_group=cfg.NONLOCAL.GROUP[1], + nonlocal_pool=cfg.NONLOCAL.POOL[1], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[1], + norm_module=self.norm_module, + ) + + self.s4 = resnet_helper.ResStage( + dim_in=[width_per_group * 8], + dim_out=[width_per_group * 16], + dim_inner=[dim_inner * 4], + temp_kernel_sizes=temp_kernel[3], + stride=cfg.RESNET.SPATIAL_STRIDES[2], + num_blocks=[d4], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2], + nonlocal_inds=cfg.NONLOCAL.LOCATION[2], + nonlocal_group=cfg.NONLOCAL.GROUP[2], + nonlocal_pool=cfg.NONLOCAL.POOL[2], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[2], + norm_module=self.norm_module, + ) + + # self.s5 = resnet_helper.ResStage( + # dim_in=[width_per_group * 16], + # dim_out=[width_per_group * 32], + # dim_inner=[dim_inner * 8], + # temp_kernel_sizes=temp_kernel[4], + # stride=cfg.RESNET.SPATIAL_STRIDES[3], + # num_blocks=[d5], + # num_groups=[num_groups], + # num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3], + # nonlocal_inds=cfg.NONLOCAL.LOCATION[3], + # nonlocal_group=cfg.NONLOCAL.GROUP[3], + # nonlocal_pool=cfg.NONLOCAL.POOL[3], + # instantiation=cfg.NONLOCAL.INSTANTIATION, + # trans_func_name=cfg.RESNET.TRANS_FUNC, + # stride_1x1=cfg.RESNET.STRIDE_1X1, + # inplace_relu=cfg.RESNET.INPLACE_RELU, + # dilation=cfg.RESNET.SPATIAL_DILATIONS[3], + # norm_module=self.norm_module, + # ) + self.labels=cfg.RESNET.LABELS + self.dual_define("t4",self.labels,LightDecoderBlock(width_per_group * 16,width_per_group * 8,width_per_group * 4)) + self.dual_define("t3",self.labels,LightDecoderBlock(width_per_group * 4,width_per_group * 4, 128)) + self.dual_define("conv1x1",self.labels,nn.Sequential( + nn.Conv3d(128+width_per_group, 64, kernel_size=(1, 1, 1), stride=1, padding=0), + nn.BatchNorm3d(64), + nn.ReLU(), + nn.Conv3d(64, 1, kernel_size=(1, 1, 1), stride=1, padding=0), + )) + + self.linear = nn.Linear(1, 2) + + def forward_plus(self, x, y, net): + return [net(x)[0] + y[0]] + + + def dual_define(self,name,labels,net): + for label in labels: + self.add_module(f"{name}_{label}",copy.deepcopy(net)) + + + def upsample(self, x, dims=["space"]): + ori_size = x[0].shape[2:5] + t, h, w = ori_size + if "space" in dims: + h = 2 * h + w = 2 * w + if "time" in dims: + t = 2 * t + size = (t, h, w) + return [F.interpolate(x[0], size)] + + def concat(self,x,y): + return [torch.cat([x[0],y[0]],1)] + + def get_detach_var(self,x): + return [t.detach() for t in x] + + # @torchsnooper.snoop() + def forward(self, x, freeze_backbone=False): + x = self.get_detach_var(x) + x1 = self.s1(x) # 1,64,8,56,56 + x2 = self.s2(x1) # 1,256,8,56,56 + x3 = self.s3(x2) # 1,512,8,28, 28 + feat= self.s4(x3) # 1,1024,8,14,14 + assert isinstance(freeze_backbone,bool) + if freeze_backbone: + feat=self.get_detach_var(feat) + x1=self.get_detach_var(x1) + x2=self.get_detach_var(x2) + x3=self.get_detach_var(x3) + + feat = self.upsample(feat) # 1,1024, 8, 28, 28 + feat = self.concat(x3,feat)# 1,1024+512, 8, 28, 28 + reg_out=[self.forward_branch(feat,x1,x2,label) for label in self.labels] + reg_out=torch.cat(reg_out,1) + reg_out=torch.sigmoid(reg_out) + class_out = reg_out.mean([3, 4]).view(-1, 1)*100 # 1,2,8,56,56 + class_out = self.linear(class_out) + class_out = class_out.view(reg_out.size(0),len(self.labels),-1) + class_out = class_out + return reg_out,class_out + + + def forward_branch(self,feat,x1,x2,label): + t4= getattr(self,f"t4_{label}") + feat = t4(feat[0])# 1,256, 8, 28, 28 + feat = self.upsample([feat]) # 1,256, 8, 56, 56 + feat = self.concat(x2,feat)# 1,256+256, 8, 56, 56 + t3= getattr(self,f"t3_{label}") + feat = t3(feat[0]) # 1,128, 8, 56, 56 + feat = self.concat(x1,[feat]) # 1,192, 8, 56, 56 + conv1x1=getattr(self,f"conv1x1_{label}") + feat = conv1x1(feat[0]) # 1,2,8,56,56 + return feat + + + + +@MODEL_REGISTRY.register() +class ResUNetCommon2(nn.Module): + """ + ResNet model builder. It builds a ResNet like network backbone without + lateral connection (C2D, I3D, Slow). + + Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. + "SlowFast networks for video recognition." + https://arxiv.org/pdf/1812.03982.pdf + + Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. + "Non-local neural networks." + https://arxiv.org/pdf/1711.07971.pdf + """ + + def __init__(self, cfg): + """ + The `__init__` method of any subclass should also contain these + arguments. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + super(ResUNetCommon2, self).__init__() + self.norm_module = get_norm(cfg) + self.enable_detection = cfg.DETECTION.ENABLE + self.enable_jitter = cfg.JITTER.ENABLE + self.num_pathways = 1 + assert cfg.DATA.TRAIN_CROP_SIZE == cfg.DATA.TEST_CROP_SIZE + self.image_size = cfg.DATA.TRAIN_CROP_SIZE + self.clip_size = cfg.DATA.NUM_FRAMES + self._construct_network(cfg) + init_helper.init_weights( + self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN + ) + + def _construct_network(self, cfg): + """ + Builds a single pathway ResNet model. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + assert cfg.MODEL.ARCH in _POOL1.keys() + pool_size = _POOL1[cfg.MODEL.ARCH] + self.cfg = cfg + assert len({len(pool_size), self.num_pathways}) == 1 + assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys() + + (d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH] + + num_groups = cfg.RESNET.NUM_GROUPS + width_per_group = cfg.RESNET.WIDTH_PER_GROUP + dim_inner = num_groups * width_per_group + + temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH] + + self.s1 = stem_helper.VideoModelStem( + dim_in=cfg.DATA.INPUT_CHANNEL_NUM, + dim_out=[width_per_group], + kernel=[temp_kernel[0][0] + [7, 7]], + stride=[[1, 2, 2]], + padding=[[temp_kernel[0][0][0] // 2, 3, 3]], + norm_module=self.norm_module, + ) + + self.s2 = resnet_helper.ResStage( + dim_in=[width_per_group], + dim_out=[width_per_group * 4], + dim_inner=[dim_inner], + temp_kernel_sizes=temp_kernel[1], + stride=cfg.RESNET.SPATIAL_STRIDES[0], + num_blocks=[d2], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0], + nonlocal_inds=cfg.NONLOCAL.LOCATION[0], + nonlocal_group=cfg.NONLOCAL.GROUP[0], + nonlocal_pool=cfg.NONLOCAL.POOL[0], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[0], + norm_module=self.norm_module, + ) + + for pathway in range(self.num_pathways): + pool = nn.MaxPool3d( + kernel_size=pool_size[pathway], + stride=pool_size[pathway], + padding=[0, 0, 0], + ) + self.add_module("pathway{}_pool".format(pathway), pool) + + self.s3 = resnet_helper.ResStage( + dim_in=[width_per_group * 4], + dim_out=[width_per_group * 8], + dim_inner=[dim_inner * 2], + temp_kernel_sizes=temp_kernel[2], + stride=cfg.RESNET.SPATIAL_STRIDES[1], + num_blocks=[d3], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1], + nonlocal_inds=cfg.NONLOCAL.LOCATION[1], + nonlocal_group=cfg.NONLOCAL.GROUP[1], + nonlocal_pool=cfg.NONLOCAL.POOL[1], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[1], + norm_module=self.norm_module, + ) + + self.s4 = resnet_helper.ResStage( + dim_in=[width_per_group * 8], + dim_out=[width_per_group * 16], + dim_inner=[dim_inner * 4], + temp_kernel_sizes=temp_kernel[3], + stride=cfg.RESNET.SPATIAL_STRIDES[2], + num_blocks=[d4], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2], + nonlocal_inds=cfg.NONLOCAL.LOCATION[2], + nonlocal_group=cfg.NONLOCAL.GROUP[2], + nonlocal_pool=cfg.NONLOCAL.POOL[2], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[2], + norm_module=self.norm_module, + ) + + # self.s5 = resnet_helper.ResStage( + # dim_in=[width_per_group * 16], + # dim_out=[width_per_group * 32], + # dim_inner=[dim_inner * 8], + # temp_kernel_sizes=temp_kernel[4], + # stride=cfg.RESNET.SPATIAL_STRIDES[3], + # num_blocks=[d5], + # num_groups=[num_groups], + # num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3], + # nonlocal_inds=cfg.NONLOCAL.LOCATION[3], + # nonlocal_group=cfg.NONLOCAL.GROUP[3], + # nonlocal_pool=cfg.NONLOCAL.POOL[3], + # instantiation=cfg.NONLOCAL.INSTANTIATION, + # trans_func_name=cfg.RESNET.TRANS_FUNC, + # stride_1x1=cfg.RESNET.STRIDE_1X1, + # inplace_relu=cfg.RESNET.INPLACE_RELU, + # dilation=cfg.RESNET.SPATIAL_DILATIONS[3], + # norm_module=self.norm_module, + # ) + self.labels=cfg.RESNET.LABELS + self.dual_define("t4",self.labels,LightDecoderBlock(width_per_group * 16,width_per_group * 8,width_per_group * 4)) + self.dual_define("t3",self.labels,LightDecoderBlock(width_per_group * 4,width_per_group * 4, 128)) + self.dual_define("conv1x1",self.labels,nn.Sequential( + nn.Conv3d(128+width_per_group, 64, kernel_size=(1, 1, 1), stride=1, padding=0), + nn.BatchNorm3d(64), + nn.ReLU(), + nn.Conv3d(64, 1, kernel_size=(1, 1, 1), stride=1, padding=0), + )) + + self.linear = nn.Linear(1, 1) + + def forward_plus(self, x, y, net): + return [net(x)[0] + y[0]] + + + def dual_define(self,name,labels,net): + for label in labels: + self.add_module(f"{name}_{label}",copy.deepcopy(net)) + + + def upsample(self, x, dims=["space"]): + ori_size = x[0].shape[2:5] + t, h, w = ori_size + if "space" in dims: + h = 2 * h + w = 2 * w + if "time" in dims: + t = 2 * t + size = (t, h, w) + return [F.interpolate(x[0], size)] + + def concat(self,x,y): + return [torch.cat([x[0],y[0]],1)] + + def get_detach_var(self,x): + return [t.detach() for t in x] + + # @torchsnooper.snoop() + def forward(self, x, freeze_backbone=False): + x = self.get_detach_var(x) + x1 = self.s1(x) # 1,64,8,56,56 + x2 = self.s2(x1) # 1,256,8,56,56 + x3 = self.s3(x2) # 1,512,8,28, 28 + feat= self.s4(x3) # 1,1024,8,14,14 + assert isinstance(freeze_backbone,bool) + if freeze_backbone: + feat=self.get_detach_var(feat) + x1=self.get_detach_var(x1) + x2=self.get_detach_var(x2) + x3=self.get_detach_var(x3) + + feat = self.upsample(feat) # 1,1024, 8, 28, 28 + feat = self.concat(x3,feat)# 1,1024+512, 8, 28, 28 + reg_out=[self.forward_branch(feat,x1,x2,label) for label in self.labels] + reg_out=torch.cat(reg_out,1) + reg_out=torch.sigmoid(reg_out) + class_out = reg_out.mean([3, 4]).view(-1, 1)*100 # 1,2,8,56,56 + class_out = self.linear(class_out) + class_out = class_out.view(reg_out.size(0),len(self.labels),-1) + class_out = torch.sigmoid(class_out) + return reg_out,class_out + + + def forward_branch(self,feat,x1,x2,label): + t4= getattr(self,f"t4_{label}") + feat = t4(feat[0])# 1,256, 8, 28, 28 + feat = self.upsample([feat]) # 1,256, 8, 56, 56 + feat = self.concat(x2,feat)# 1,256+256, 8, 56, 56 + t3= getattr(self,f"t3_{label}") + feat = t3(feat[0]) # 1,128, 8, 56, 56 + feat = self.concat(x1,[feat]) # 1,192, 8, 56, 56 + conv1x1=getattr(self,f"conv1x1_{label}") + feat = conv1x1(feat[0]) # 1,2,8,56,56 + return feat + + + +@MODEL_REGISTRY.register() +class ResUNetStrong(nn.Module): + """ + ResNet model builder. It builds a ResNet like network backbone without + lateral connection (C2D, I3D, Slow). + + Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. + "SlowFast networks for video recognition." + https://arxiv.org/pdf/1812.03982.pdf + + Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. + "Non-local neural networks." + https://arxiv.org/pdf/1711.07971.pdf + """ + + def __init__(self, cfg): + """ + The `__init__` method of any subclass should also contain these + arguments. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + super(ResUNetStrong, self).__init__() + self.norm_module = get_norm(cfg) + self.enable_detection = cfg.DETECTION.ENABLE + self.enable_jitter = cfg.JITTER.ENABLE + self.num_pathways = 1 + assert cfg.DATA.TRAIN_CROP_SIZE == cfg.DATA.TEST_CROP_SIZE + self.image_size = cfg.DATA.TRAIN_CROP_SIZE + self.clip_size = cfg.DATA.NUM_FRAMES + self._construct_network(cfg) + init_helper.init_weights( + self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN + ) + + def _construct_network(self, cfg): + """ + Builds a single pathway ResNet model. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + assert cfg.MODEL.ARCH in _POOL1.keys() + pool_size = _POOL1[cfg.MODEL.ARCH] + self.cfg = cfg + assert len({len(pool_size), self.num_pathways}) == 1 + assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys() + + (d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH] + + num_groups = cfg.RESNET.NUM_GROUPS + width_per_group = cfg.RESNET.WIDTH_PER_GROUP + dim_inner = num_groups * width_per_group + + temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH] + + self.s1 = stem_helper.VideoModelStem( + dim_in=cfg.DATA.INPUT_CHANNEL_NUM, + dim_out=[width_per_group], + kernel=[temp_kernel[0][0] + [7, 7]], + stride=[[1, 2, 2]], + padding=[[temp_kernel[0][0][0] // 2, 3, 3]], + norm_module=self.norm_module, + ) + + self.s2 = resnet_helper.ResStage( + dim_in=[width_per_group], + dim_out=[width_per_group * 4], + dim_inner=[dim_inner], + temp_kernel_sizes=temp_kernel[1], + stride=cfg.RESNET.SPATIAL_STRIDES[0], + num_blocks=[d2], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0], + nonlocal_inds=cfg.NONLOCAL.LOCATION[0], + nonlocal_group=cfg.NONLOCAL.GROUP[0], + nonlocal_pool=cfg.NONLOCAL.POOL[0], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[0], + norm_module=self.norm_module, + ) + + for pathway in range(self.num_pathways): + pool = nn.MaxPool3d( + kernel_size=pool_size[pathway], + stride=pool_size[pathway], + padding=[0, 0, 0], + ) + self.add_module("pathway{}_pool".format(pathway), pool) + + self.s3 = resnet_helper.ResStage( + dim_in=[width_per_group * 4], + dim_out=[width_per_group * 8], + dim_inner=[dim_inner * 2], + temp_kernel_sizes=temp_kernel[2], + stride=cfg.RESNET.SPATIAL_STRIDES[1], + num_blocks=[d3], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1], + nonlocal_inds=cfg.NONLOCAL.LOCATION[1], + nonlocal_group=cfg.NONLOCAL.GROUP[1], + nonlocal_pool=cfg.NONLOCAL.POOL[1], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[1], + norm_module=self.norm_module, + ) + + self.s4 = resnet_helper.ResStage( + dim_in=[width_per_group * 8], + dim_out=[width_per_group * 16], + dim_inner=[dim_inner * 4], + temp_kernel_sizes=temp_kernel[3], + stride=cfg.RESNET.SPATIAL_STRIDES[2], + num_blocks=[d4], + num_groups=[num_groups], + num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2], + nonlocal_inds=cfg.NONLOCAL.LOCATION[2], + nonlocal_group=cfg.NONLOCAL.GROUP[2], + nonlocal_pool=cfg.NONLOCAL.POOL[2], + instantiation=cfg.NONLOCAL.INSTANTIATION, + trans_func_name=cfg.RESNET.TRANS_FUNC, + stride_1x1=cfg.RESNET.STRIDE_1X1, + inplace_relu=cfg.RESNET.INPLACE_RELU, + dilation=cfg.RESNET.SPATIAL_DILATIONS[2], + norm_module=self.norm_module, + ) + + # self.s5 = resnet_helper.ResStage( + # dim_in=[width_per_group * 16], + # dim_out=[width_per_group * 32], + # dim_inner=[dim_inner * 8], + # temp_kernel_sizes=temp_kernel[4], + # stride=cfg.RESNET.SPATIAL_STRIDES[3], + # num_blocks=[d5], + # num_groups=[num_groups], + # num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3], + # nonlocal_inds=cfg.NONLOCAL.LOCATION[3], + # nonlocal_group=cfg.NONLOCAL.GROUP[3], + # nonlocal_pool=cfg.NONLOCAL.POOL[3], + # instantiation=cfg.NONLOCAL.INSTANTIATION, + # trans_func_name=cfg.RESNET.TRANS_FUNC, + # stride_1x1=cfg.RESNET.STRIDE_1X1, + # inplace_relu=cfg.RESNET.INPLACE_RELU, + # dilation=cfg.RESNET.SPATIAL_DILATIONS[3], + # norm_module=self.norm_module, + # ) + + self.labels=cfg.RESNET.LABELS + self.dual_define("t4",self.labels,ResDecoderBlock(width_per_group * 16,width_per_group * 8,width_per_group * 8)) + self.dual_define("t3",self.labels,ResDecoderBlock(width_per_group * 8,width_per_group * 4, 256)) + self.dual_define("conv1x1",self.labels,nn.Sequential( + nn.Conv3d(width_per_group*4+width_per_group, 128, kernel_size=(1, 1, 1), stride=1, padding=0), + nn.BatchNorm3d(128), + nn.ReLU(), + nn.Conv3d(128, 1, kernel_size=(1, 1, 1), stride=1, padding=0), + )) + + self.linear = nn.Linear(1, 1) + + def forward_plus(self, x, y, net): + return [net(x)[0] + y[0]] + + + def dual_define(self,name,labels,net): + for label in labels: + self.add_module(f"{name}_{label}",copy.deepcopy(net)) + + + def upsample(self, x, dims=["space"]): + ori_size = x[0].shape[2:5] + t, h, w = ori_size + if "space" in dims: + h = 2 * h + w = 2 * w + if "time" in dims: + t = 2 * t + size = (t, h, w) + return [F.interpolate(x[0], size)] + + def concat(self,x,y): + return [torch.cat([x[0],y[0]],1)] + + def get_detach_var(self,x): + return [t.detach() for t in x] + + # @torchsnooper.snoop() + def forward(self, x, freeze_backbone=False): + x = self.get_detach_var(x) + x1 = self.s1(x) # 1,64,8,56,56 + x2 = self.s2(x1) # 1,256,8,56,56 + x3 = self.s3(x2) # 1,512,8,28, 28 + feat= self.s4(x3) # 1,1024,8,14,14 + assert isinstance(freeze_backbone,bool) + if freeze_backbone: + feat=self.get_detach_var(feat) + x1=self.get_detach_var(x1) + x2=self.get_detach_var(x2) + x3=self.get_detach_var(x3) + + feat = self.upsample(feat) # 1,1024, 8, 28, 28 + feat = self.concat(x3,feat)# 1,1024+512, 8, 28, 28 + reg_out=[self.forward_branch(feat,x1,x2,label) for label in self.labels] + reg_out=torch.cat(reg_out,1) + reg_out=torch.sigmoid(reg_out) + class_out = reg_out.mean([3, 4]).view(-1, 1)*100 # 1,2,8,56,56 + class_out = self.linear(class_out) + class_out = class_out.view(reg_out.size(0),len(self.labels),-1) + class_out = torch.sigmoid(class_out) + return reg_out,class_out + + + def forward_branch(self,feat,x1,x2,label): + t4= getattr(self,f"t4_{label}") + feat = t4(feat[0])# 1,256, 8, 28, 28 + feat = self.upsample([feat]) # 1,256, 8, 56, 56 + feat = self.concat(x2,feat)# 1,256+256, 8, 56, 56 + t3= getattr(self,f"t3_{label}") + feat = t3(feat[0]) # 1,128, 8, 56, 56 + feat = self.concat(x1,[feat]) # 1,192, 8, 56, 56 + conv1x1=getattr(self,f"conv1x1_{label}") + feat = conv1x1(feat[0]) # 1,2,8,56,56 + return feat diff --git a/training/detectors/utils/slowfast/utils/__init__.py b/training/detectors/utils/slowfast/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8dbe96a785072a24a9bcc4841a1934024f2b06a1 --- /dev/null +++ b/training/detectors/utils/slowfast/utils/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. diff --git a/training/detectors/utils/slowfast/utils/ava_eval_helper.py b/training/detectors/utils/slowfast/utils/ava_eval_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..9e8ba5468077053a4dcf1920f25256a1a241f0b9 --- /dev/null +++ b/training/detectors/utils/slowfast/utils/ava_eval_helper.py @@ -0,0 +1,302 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################## +# +# Based on: +# -------------------------------------------------------- +# ActivityNet +# Copyright (c) 2015 ActivityNet +# Licensed under The MIT License +# [see https://github.com/activitynet/ActivityNet/blob/master/LICENSE for details] +# -------------------------------------------------------- + +"""Helper functions for AVA evaluation.""" + +from __future__ import ( + absolute_import, + division, + print_function, + unicode_literals, +) +import csv +import logging +import numpy as np +import pprint +import time +from collections import defaultdict +from fvcore.common.file_io import PathManager + +from slowfast.utils.ava_evaluation import ( + object_detection_evaluation, + standard_fields, +) + +logger = logging.getLogger(__name__) + + +def make_image_key(video_id, timestamp): + """Returns a unique identifier for a video id & timestamp.""" + return "%s,%04d" % (video_id, int(timestamp)) + + +def read_csv(csv_file, class_whitelist=None, load_score=False): + """Loads boxes and class labels from a CSV file in the AVA format. + CSV file format described at https://research.google.com/ava/download.html. + Args: + csv_file: A file object. + class_whitelist: If provided, boxes corresponding to (integer) class labels + not in this set are skipped. + Returns: + boxes: A dictionary mapping each unique image key (string) to a list of + boxes, given as coordinates [y1, x1, y2, x2]. + labels: A dictionary mapping each unique image key (string) to a list of + integer class lables, matching the corresponding box in `boxes`. + scores: A dictionary mapping each unique image key (string) to a list of + score values lables, matching the corresponding label in `labels`. If + scores are not provided in the csv, then they will default to 1.0. + """ + boxes = defaultdict(list) + labels = defaultdict(list) + scores = defaultdict(list) + with PathManager.open(csv_file, "r") as f: + reader = csv.reader(f) + for row in reader: + assert len(row) in [7, 8], "Wrong number of columns: " + row + image_key = make_image_key(row[0], row[1]) + x1, y1, x2, y2 = [float(n) for n in row[2:6]] + action_id = int(row[6]) + if class_whitelist and action_id not in class_whitelist: + continue + score = 1.0 + if load_score: + score = float(row[7]) + boxes[image_key].append([y1, x1, y2, x2]) + labels[image_key].append(action_id) + scores[image_key].append(score) + return boxes, labels, scores + + +def read_exclusions(exclusions_file): + """Reads a CSV file of excluded timestamps. + Args: + exclusions_file: A file object containing a csv of video-id,timestamp. + Returns: + A set of strings containing excluded image keys, e.g. "aaaaaaaaaaa,0904", + or an empty set if exclusions file is None. + """ + excluded = set() + if exclusions_file: + with PathManager.open(exclusions_file, "r") as f: + reader = csv.reader(f) + for row in reader: + assert len(row) == 2, "Expected only 2 columns, got: " + row + excluded.add(make_image_key(row[0], row[1])) + return excluded + + +def read_labelmap(labelmap_file): + """Read label map and class ids.""" + + labelmap = [] + class_ids = set() + name = "" + class_id = "" + with PathManager.open(labelmap_file, "r") as f: + for line in f: + if line.startswith(" name:"): + name = line.split('"')[1] + elif line.startswith(" id:") or line.startswith(" label_id:"): + class_id = int(line.strip().split(" ")[-1]) + labelmap.append({"id": class_id, "name": name}) + class_ids.add(class_id) + return labelmap, class_ids + + +def evaluate_ava_from_files(labelmap, groundtruth, detections, exclusions): + """Run AVA evaluation given annotation/prediction files.""" + + categories, class_whitelist = read_labelmap(labelmap) + excluded_keys = read_exclusions(exclusions) + groundtruth = read_csv(groundtruth, class_whitelist, load_score=False) + detections = read_csv(detections, class_whitelist, load_score=True) + run_evaluation(categories, groundtruth, detections, excluded_keys) + + +def evaluate_ava( + preds, + original_boxes, + metadata, + excluded_keys, + class_whitelist, + categories, + groundtruth=None, + video_idx_to_name=None, + name="latest", +): + """Run AVA evaluation given numpy arrays.""" + + eval_start = time.time() + + detections = get_ava_eval_data( + preds, + original_boxes, + metadata, + class_whitelist, + video_idx_to_name=video_idx_to_name, + ) + + logger.info("Evaluating with %d unique GT frames." % len(groundtruth[0])) + logger.info( + "Evaluating with %d unique detection frames" % len(detections[0]) + ) + + write_results(detections, "detections_%s.csv" % name) + write_results(groundtruth, "groundtruth_%s.csv" % name) + + results = run_evaluation(categories, groundtruth, detections, excluded_keys) + + logger.info("AVA eval done in %f seconds." % (time.time() - eval_start)) + return results["PascalBoxes_Precision/mAP@0.5IOU"] + + +def run_evaluation( + categories, groundtruth, detections, excluded_keys, verbose=True +): + """AVA evaluation main logic.""" + + pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator( + categories + ) + + boxes, labels, _ = groundtruth + + gt_keys = [] + pred_keys = [] + + for image_key in boxes: + if image_key in excluded_keys: + logging.info( + ( + "Found excluded timestamp in ground truth: %s. " + "It will be ignored." + ), + image_key, + ) + continue + pascal_evaluator.add_single_ground_truth_image_info( + image_key, + { + standard_fields.InputDataFields.groundtruth_boxes: np.array( + boxes[image_key], dtype=float + ), + standard_fields.InputDataFields.groundtruth_classes: np.array( + labels[image_key], dtype=int + ), + standard_fields.InputDataFields.groundtruth_difficult: np.zeros( + len(boxes[image_key]), dtype=bool + ), + }, + ) + + gt_keys.append(image_key) + + boxes, labels, scores = detections + + for image_key in boxes: + if image_key in excluded_keys: + logging.info( + ( + "Found excluded timestamp in detections: %s. " + "It will be ignored." + ), + image_key, + ) + continue + pascal_evaluator.add_single_detected_image_info( + image_key, + { + standard_fields.DetectionResultFields.detection_boxes: np.array( + boxes[image_key], dtype=float + ), + standard_fields.DetectionResultFields.detection_classes: np.array( + labels[image_key], dtype=int + ), + standard_fields.DetectionResultFields.detection_scores: np.array( + scores[image_key], dtype=float + ), + }, + ) + + pred_keys.append(image_key) + + metrics = pascal_evaluator.evaluate() + + pprint.pprint(metrics, indent=2) + return metrics + + +def get_ava_eval_data( + scores, + boxes, + metadata, + class_whitelist, + verbose=False, + video_idx_to_name=None, +): + """ + Convert our data format into the data format used in official AVA + evaluation. + """ + + out_scores = defaultdict(list) + out_labels = defaultdict(list) + out_boxes = defaultdict(list) + count = 0 + for i in range(scores.shape[0]): + video_idx = int(np.round(metadata[i][0])) + sec = int(np.round(metadata[i][1])) + + video = video_idx_to_name[video_idx] + + key = video + "," + "%04d" % (sec) + batch_box = boxes[i].tolist() + # The first is batch idx. + batch_box = [batch_box[j] for j in [0, 2, 1, 4, 3]] + + one_scores = scores[i].tolist() + for cls_idx, score in enumerate(one_scores): + if cls_idx + 1 in class_whitelist: + out_scores[key].append(score) + out_labels[key].append(cls_idx + 1) + out_boxes[key].append(batch_box[1:]) + count += 1 + + return out_boxes, out_labels, out_scores + + +def write_results(detections, filename): + """Write prediction results into official formats.""" + start = time.time() + + boxes, labels, scores = detections + with PathManager.open(filename, "w") as f: + for key in boxes.keys(): + for box, label, score in zip(boxes[key], labels[key], scores[key]): + f.write( + "%s,%.03f,%.03f,%.03f,%.03f,%d,%.04f\n" + % (key, box[1], box[0], box[3], box[2], label, score) + ) + + logger.info("AVA results wrote to %s" % filename) + logger.info("\ttook %d seconds." % (time.time() - start)) diff --git a/training/detectors/utils/slowfast/utils/benchmark.py b/training/detectors/utils/slowfast/utils/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..33e5fe9073ad61ecec737d6b4a6a2880eec15cb9 --- /dev/null +++ b/training/detectors/utils/slowfast/utils/benchmark.py @@ -0,0 +1,103 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Functions for benchmarks. +""" + +import numpy as np +import pprint +import torch +import tqdm +from fvcore.common.timer import Timer + +import slowfast.utils.logging as logging +import slowfast.utils.misc as misc +from slowfast.datasets import loader +from slowfast.utils.env import setup_environment + +logger = logging.get_logger(__name__) + + +def benchmark_data_loading(cfg): + """ + Benchmark the speed of data loading in PySlowFast. + Args: + + cfg (CfgNode): configs. Details can be found in + slowfast/config/defaults.py + """ + # Set up environment. + setup_environment() + # Set random seed from configs. + np.random.seed(cfg.RNG_SEED) + torch.manual_seed(cfg.RNG_SEED) + + # Setup logging format. + logging.setup_logging(cfg.OUTPUT_DIR) + + # Print config. + logger.info("Benchmark data loading with config:") + logger.info(pprint.pformat(cfg)) + + timer = Timer() + dataloader = loader.construct_loader(cfg, "train") + logger.info( + "Initialize loader using {:.2f} seconds.".format(timer.seconds()) + ) + # Total batch size across different machines. + batch_size = cfg.TRAIN.BATCH_SIZE * cfg.NUM_SHARDS + log_period = cfg.BENCHMARK.LOG_PERIOD + epoch_times = [] + # Test for a few epochs. + for cur_epoch in range(cfg.BENCHMARK.NUM_EPOCHS): + timer = Timer() + timer_epoch = Timer() + iter_times = [] + if cfg.BENCHMARK.SHUFFLE: + loader.shuffle_dataset(dataloader, cur_epoch) + for cur_iter, _ in enumerate(tqdm.tqdm(dataloader)): + if cur_iter > 0 and cur_iter % log_period == 0: + iter_times.append(timer.seconds()) + ram_usage, ram_total = misc.cpu_mem_usage() + logger.info( + "Epoch {}: {} iters ({} videos) in {:.2f} seconds. " + "RAM Usage: {:.2f}/{:.2f} GB.".format( + cur_epoch, + log_period, + log_period * batch_size, + iter_times[-1], + ram_usage, + ram_total, + ) + ) + timer.reset() + epoch_times.append(timer_epoch.seconds()) + ram_usage, ram_total = misc.cpu_mem_usage() + logger.info( + "Epoch {}: in total {} iters ({} videos) in {:.2f} seconds. " + "RAM Usage: {:.2f}/{:.2f} GB.".format( + cur_epoch, + len(dataloader), + len(dataloader) * batch_size, + epoch_times[-1], + ram_usage, + ram_total, + ) + ) + logger.info( + "Epoch {}: on average every {} iters ({} videos) take {:.2f}/{:.2f} " + "(avg/std) seconds.".format( + cur_epoch, + log_period, + log_period * batch_size, + np.mean(iter_times), + np.std(iter_times), + ) + ) + logger.info( + "On average every epoch ({} videos) takes {:.2f}/{:.2f} " + "(avg/std) seconds.".format( + len(dataloader) * batch_size, + np.mean(epoch_times), + np.std(epoch_times), + ) + ) diff --git a/training/detectors/utils/slowfast/utils/bn_helper.py b/training/detectors/utils/slowfast/utils/bn_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..b18d8c76c10d7598db61ba8ca192314140f9ba79 --- /dev/null +++ b/training/detectors/utils/slowfast/utils/bn_helper.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""bn helper.""" + +import itertools +import torch + + +@torch.no_grad() +def compute_and_update_bn_stats(model, data_loader, num_batches=200): + """ + Compute and update the batch norm stats to make it more precise. During + training both bn stats and the weight are changing after every iteration, + so the bn can not precisely reflect the latest stats of the current model. + Here the bn stats is recomputed without change of weights, to make the + running mean and running var more precise. + Args: + model (model): the model using to compute and update the bn stats. + data_loader (dataloader): dataloader using to provide inputs. + num_batches (int): running iterations using to compute the stats. + """ + + # Prepares all the bn layers. + bn_layers = [ + m + for m in model.modules() + if any( + ( + isinstance(m, bn_type) + for bn_type in ( + torch.nn.BatchNorm1d, + torch.nn.BatchNorm2d, + torch.nn.BatchNorm3d, + ) + ) + ) + ] + + # In order to make the running stats only reflect the current batch, the + # momentum is disabled. + # bn.running_mean = (1 - momentum) * bn.running_mean + momentum * batch_mean + # Setting the momentum to 1.0 to compute the stats without momentum. + momentum_actual = [bn.momentum for bn in bn_layers] + for bn in bn_layers: + bn.momentum = 1.0 + + # Calculates the running iterations for precise stats computation. + running_mean = [torch.zeros_like(bn.running_mean) for bn in bn_layers] + running_square_mean = [torch.zeros_like(bn.running_var) for bn in bn_layers] + + for ind, (inputs, _, _) in enumerate( + itertools.islice(data_loader, num_batches) + ): + # Forwards the model to update the bn stats. + if isinstance(inputs, (list,)): + for i in range(len(inputs)): + inputs[i] = inputs[i].float().cuda(non_blocking=True) + else: + inputs = inputs.cuda(non_blocking=True) + model(inputs) + + for i, bn in enumerate(bn_layers): + # Accumulates the bn stats. + running_mean[i] += (bn.running_mean - running_mean[i]) / (ind + 1) + # $E(x^2) = Var(x) + E(x)^2$. + cur_square_mean = bn.running_var + bn.running_mean ** 2 + running_square_mean[i] += ( + cur_square_mean - running_square_mean[i] + ) / (ind + 1) + + for i, bn in enumerate(bn_layers): + bn.running_mean = running_mean[i] + # Var(x) = $E(x^2) - E(x)^2$. + bn.running_var = running_square_mean[i] - bn.running_mean ** 2 + # Sets the precise bn stats. + bn.momentum = momentum_actual[i] diff --git a/training/detectors/utils/slowfast/utils/c2_model_loading.py b/training/detectors/utils/slowfast/utils/c2_model_loading.py new file mode 100644 index 0000000000000000000000000000000000000000..4bcc0759c484fd321917c55e9967835632c2ac54 --- /dev/null +++ b/training/detectors/utils/slowfast/utils/c2_model_loading.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Caffe2 to PyTorch checkpoint name converting utility.""" + +import re + + +def get_name_convert_func(): + """ + Get the function to convert Caffe2 layer names to PyTorch layer names. + Returns: + (func): function to convert parameter name from Caffe2 format to PyTorch + format. + """ + pairs = [ + # ------------------------------------------------------------ + # 'nonlocal_conv3_1_theta_w' -> 's3.pathway0_nonlocal3.conv_g.weight' + [ + r"^nonlocal_conv([0-9]+)_([0-9]+)_(.*)", + r"s\1.pathway0_nonlocal\2_\3", + ], + # 'theta' -> 'conv_theta' + [r"^(.*)_nonlocal([0-9]+)_(theta)(.*)", r"\1_nonlocal\2.conv_\3\4"], + # 'g' -> 'conv_g' + [r"^(.*)_nonlocal([0-9]+)_(g)(.*)", r"\1_nonlocal\2.conv_\3\4"], + # 'phi' -> 'conv_phi' + [r"^(.*)_nonlocal([0-9]+)_(phi)(.*)", r"\1_nonlocal\2.conv_\3\4"], + # 'out' -> 'conv_out' + [r"^(.*)_nonlocal([0-9]+)_(out)(.*)", r"\1_nonlocal\2.conv_\3\4"], + # 'nonlocal_conv4_5_bn_s' -> 's4.pathway0_nonlocal3.bn.weight' + [r"^(.*)_nonlocal([0-9]+)_(bn)_(.*)", r"\1_nonlocal\2.\3.\4"], + # ------------------------------------------------------------ + # 't_pool1_subsample_bn' -> 's1_fuse.conv_f2s.bn.running_mean' + [r"^t_pool1_subsample_bn_(.*)", r"s1_fuse.bn.\1"], + # 't_pool1_subsample' -> 's1_fuse.conv_f2s' + [r"^t_pool1_subsample_(.*)", r"s1_fuse.conv_f2s.\1"], + # 't_res4_5_branch2c_bn_subsample_bn_rm' -> 's4_fuse.conv_f2s.bias' + [ + r"^t_res([0-9]+)_([0-9]+)_branch2c_bn_subsample_bn_(.*)", + r"s\1_fuse.bn.\3", + ], + # 't_pool1_subsample' -> 's1_fuse.conv_f2s' + [ + r"^t_res([0-9]+)_([0-9]+)_branch2c_bn_subsample_(.*)", + r"s\1_fuse.conv_f2s.\3", + ], + # ------------------------------------------------------------ + # 'res4_4_branch_2c_bn_b' -> 's4.pathway0_res4.branch2.c_bn_b' + [ + r"^res([0-9]+)_([0-9]+)_branch([0-9]+)([a-z])_(.*)", + r"s\1.pathway0_res\2.branch\3.\4_\5", + ], + # 'res_conv1_bn_' -> 's1.pathway0_stem.bn.' + [r"^res_conv1_bn_(.*)", r"s1.pathway0_stem.bn.\1"], + # 'conv1_w_momentum' -> 's1.pathway0_stem.conv.' + [r"^conv1_(.*)", r"s1.pathway0_stem.conv.\1"], + # 'res4_0_branch1_w' -> 'S4.pathway0_res0.branch1.weight' + [ + r"^res([0-9]+)_([0-9]+)_branch([0-9]+)_(.*)", + r"s\1.pathway0_res\2.branch\3_\4", + ], + # 'res_conv1_' -> 's1.pathway0_stem.conv.' + [r"^res_conv1_(.*)", r"s1.pathway0_stem.conv.\1"], + # ------------------------------------------------------------ + # 'res4_4_branch_2c_bn_b' -> 's4.pathway0_res4.branch2.c_bn_b' + [ + r"^t_res([0-9]+)_([0-9]+)_branch([0-9]+)([a-z])_(.*)", + r"s\1.pathway1_res\2.branch\3.\4_\5", + ], + # 'res_conv1_bn_' -> 's1.pathway0_stem.bn.' + [r"^t_res_conv1_bn_(.*)", r"s1.pathway1_stem.bn.\1"], + # 'conv1_w_momentum' -> 's1.pathway0_stem.conv.' + [r"^t_conv1_(.*)", r"s1.pathway1_stem.conv.\1"], + # 'res4_0_branch1_w' -> 'S4.pathway0_res0.branch1.weight' + [ + r"^t_res([0-9]+)_([0-9]+)_branch([0-9]+)_(.*)", + r"s\1.pathway1_res\2.branch\3_\4", + ], + # 'res_conv1_' -> 's1.pathway0_stem.conv.' + [r"^t_res_conv1_(.*)", r"s1.pathway1_stem.conv.\1"], + # ------------------------------------------------------------ + # pred_ -> head.projection. + [r"pred_(.*)", r"head.projection.\1"], + # '.bn_b' -> '.weight' + [r"(.*)bn.b\Z", r"\1bn.bias"], + # '.bn_s' -> '.weight' + [r"(.*)bn.s\Z", r"\1bn.weight"], + # '_bn_rm' -> '.running_mean' + [r"(.*)bn.rm\Z", r"\1bn.running_mean"], + # '_bn_riv' -> '.running_var' + [r"(.*)bn.riv\Z", r"\1bn.running_var"], + # '_b' -> '.bias' + [r"(.*)[\._]b\Z", r"\1.bias"], + # '_w' -> '.weight' + [r"(.*)[\._]w\Z", r"\1.weight"], + ] + + def convert_caffe2_name_to_pytorch(caffe2_layer_name): + """ + Convert the caffe2_layer_name to pytorch format by apply the list of + regular expressions. + Args: + caffe2_layer_name (str): caffe2 layer name. + Returns: + (str): pytorch layer name. + """ + for source, dest in pairs: + caffe2_layer_name = re.sub(source, dest, caffe2_layer_name) + return caffe2_layer_name + + return convert_caffe2_name_to_pytorch diff --git a/training/detectors/utils/slowfast/utils/checkpoint.py b/training/detectors/utils/slowfast/utils/checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..05d5ac4624feecbbc1f868682f8ed921ac5fef2c --- /dev/null +++ b/training/detectors/utils/slowfast/utils/checkpoint.py @@ -0,0 +1,530 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Functions that handle saving and loading of checkpoints.""" + +import copy +import numpy as np +import os +import pickle +from collections import OrderedDict +import torch +from fvcore.common.file_io import PathManager + +import slowfast.utils.distributed as du +import slowfast.utils.logging as logging +from slowfast.utils.c2_model_loading import get_name_convert_func + +logger = logging.get_logger(__name__) + + +def make_checkpoint_dir(path_to_job): + """ + Creates the checkpoint directory (if not present already). + Args: + path_to_job (string): the path to the folder of the current job. + """ + checkpoint_dir = os.path.join(path_to_job, "checkpoints") + # Create the checkpoint dir from the master process + if du.is_master_proc() and not PathManager.exists(checkpoint_dir): + try: + PathManager.mkdirs(checkpoint_dir) + except Exception: + pass + return checkpoint_dir + + +def get_checkpoint_dir(path_to_job): + """ + Get path for storing checkpoints. + Args: + path_to_job (string): the path to the folder of the current job. + """ + return os.path.join(path_to_job, "checkpoints") + + +def get_path_to_checkpoint(path_to_job, epoch): + """ + Get the full path to a checkpoint file. + Args: + path_to_job (string): the path to the folder of the current job. + epoch (int): the number of epoch for the checkpoint. + """ + name = "checkpoint_epoch_{:07d}.pyth".format(epoch) + return os.path.join(get_checkpoint_dir(path_to_job), name) + + +def get_last_checkpoint(path_to_job): + """ + Get the last checkpoint from the checkpointing folder. + Args: + path_to_job (string): the path to the folder of the current job. + """ + + d = get_checkpoint_dir(path_to_job) + names = PathManager.ls(d) if PathManager.exists(d) else [] + names = [f for f in names if "checkpoint" in f] + assert len(names), "No checkpoints found in '{}'.".format(d) + # Sort the checkpoints by epoch. + name = sorted(names)[-1] + return os.path.join(d, name) + + +def has_checkpoint(path_to_job): + """ + Determines if the given directory contains a checkpoint. + Args: + path_to_job (string): the path to the folder of the current job. + """ + d = get_checkpoint_dir(path_to_job) + files = PathManager.ls(d) if PathManager.exists(d) else [] + return any("checkpoint" in f for f in files) + + +def is_checkpoint_epoch(cfg, cur_epoch, multigrid_schedule=None): + """ + Determine if a checkpoint should be saved on current epoch. + Args: + cfg (CfgNode): configs to save. + cur_epoch (int): current number of epoch of the model. + multigrid_schedule (List): schedule for multigrid training. + """ + if cur_epoch + 1 == cfg.SOLVER.MAX_EPOCH: + return True + if multigrid_schedule is not None: + prev_epoch = 0 + for s in multigrid_schedule: + if cur_epoch < s[-1]: + period = max( + (s[-1] - prev_epoch) // cfg.MULTIGRID.EVAL_FREQ + 1, 1 + ) + return (s[-1] - 1 - cur_epoch) % period == 0 + prev_epoch = s[-1] + + return (cur_epoch + 1) % cfg.TRAIN.CHECKPOINT_PERIOD == 0 + + +def is_checkpoint_iter(cfg, cur_iter): + """ + Determine if a checkpoint should be saved on current iter. + Args: + cfg (CfgNode): configs to save. + cur_epoch (int): current number of epoch of the model. + multigrid_schedule (List): schedule for multigrid training. + """ + + return (cur_iter+1) % cfg.TRAIN.CHECKPOINT_PERIOD_BY_ITER == 0 + + + +def save_checkpoint_by_iter(path_to_job, model, optimizer, epoch,global_step,cfg): + """ + Save a checkpoint. + Args: + model (model): model to save the weight to the checkpoint. + optimizer (optim): optimizer to save the historical state. + epoch (int): current number of epoch of the model. + cfg (CfgNode): configs to save. + """ + # Save checkpoints only from the master process. + if not du.is_master_proc(cfg.NUM_GPUS * cfg.NUM_SHARDS): + return + # Ensure that the checkpoint dir exists. + PathManager.mkdirs(get_checkpoint_dir(path_to_job)) + # Omit the DDP wrapper in the multi-gpu setting. + sd = model.module.state_dict() if cfg.NUM_GPUS > 1 else model.state_dict() + normalized_sd = sub_to_normal_bn(sd) + + # Record the state. + checkpoint = { + "epoch": epoch, + "model_state": normalized_sd, + "optimizer_state": optimizer.state_dict(), + "global_step": global_step, + "cfg": cfg.dump(), + } + # Write the checkpoint. + path_to_checkpoint = get_path_to_checkpoint(path_to_job,global_step+1) + with PathManager.open(path_to_checkpoint, "wb") as f: + torch.save(checkpoint, f) + return path_to_checkpoint + +def save_checkpoint(path_to_job, model, optimizer, epoch, cfg): + """ + Save a checkpoint. + Args: + model (model): model to save the weight to the checkpoint. + optimizer (optim): optimizer to save the historical state. + epoch (int): current number of epoch of the model. + cfg (CfgNode): configs to save. + """ + # Save checkpoints only from the master process. + if not du.is_master_proc(cfg.NUM_GPUS * cfg.NUM_SHARDS): + return + # Ensure that the checkpoint dir exists. + PathManager.mkdirs(get_checkpoint_dir(path_to_job)) + # Omit the DDP wrapper in the multi-gpu setting. + sd = model.module.state_dict() if cfg.NUM_GPUS > 1 else model.state_dict() + normalized_sd = sub_to_normal_bn(sd) + + # Record the state. + checkpoint = { + "epoch": epoch, + "model_state": normalized_sd, + "optimizer_state": optimizer.state_dict(), + "cfg": cfg.dump(), + } + # Write the checkpoint. + path_to_checkpoint = get_path_to_checkpoint(path_to_job, epoch + 1) + with PathManager.open(path_to_checkpoint, "wb") as f: + torch.save(checkpoint, f) + return path_to_checkpoint + + +def inflate_weight(state_dict_2d, state_dict_3d): + """ + Inflate 2D model weights in state_dict_2d to the 3D model weights in + state_dict_3d. The details can be found in: + Joao Carreira, and Andrew Zisserman. + "Quo vadis, action recognition? a new model and the kinetics dataset." + Args: + state_dict_2d (OrderedDict): a dict of parameters from a 2D model. + state_dict_3d (OrderedDict): a dict of parameters from a 3D model. + Returns: + state_dict_inflated (OrderedDict): a dict of inflated parameters. + """ + state_dict_inflated = OrderedDict() + for k, v2d in state_dict_2d.items(): + assert k in state_dict_3d.keys() + v3d = state_dict_3d[k] + # Inflate the weight of 2D conv to 3D conv. + if len(v2d.shape) == 4 and len(v3d.shape) == 5: + logger.info( + "Inflate {}: {} -> {}: {}".format(k, v2d.shape, k, v3d.shape) + ) + # Dimension need to be match. + assert v2d.shape[-2:] == v3d.shape[-2:] + assert v2d.shape[:2] == v3d.shape[:2] + v3d = ( + v2d.unsqueeze(2).repeat(1, 1, v3d.shape[2], 1, 1) / v3d.shape[2] + ) + elif v2d.shape == v3d.shape: + v3d = v2d + else: + logger.info( + "Unexpected {}: {} -|> {}: {}".format( + k, v2d.shape, k, v3d.shape + ) + ) + state_dict_inflated[k] = v3d.clone() + return state_dict_inflated + + +def load_checkpoint( + path_to_checkpoint, + model, + data_parallel=True, + optimizer=None, + inflation=False, + convert_from_caffe2=False, +): + """ + Load the checkpoint from the given file. If inflation is True, inflate the + 2D Conv weights from the checkpoint to 3D Conv. + Args: + path_to_checkpoint (string): path to the checkpoint to load. + model (model): model to load the weights from the checkpoint. + data_parallel (bool): if true, model is wrapped by + torch.nn.parallel.DistributedDataParallel. + optimizer (optim): optimizer to load the historical state. + inflation (bool): if True, inflate the weights from the checkpoint. + convert_from_caffe2 (bool): if True, load the model from caffe2 and + convert it to pytorch. + Returns: + (int): the number of training epoch of the checkpoint. + """ + assert PathManager.exists( + path_to_checkpoint + ), "Checkpoint '{}' not found".format(path_to_checkpoint) + # Account for the DDP wrapper in the multi-gpu setting. + ms = model.module if data_parallel else model + if convert_from_caffe2: + with PathManager.open(path_to_checkpoint, "rb") as f: + caffe2_checkpoint = pickle.load(f, encoding="latin1") + state_dict = OrderedDict() + name_convert_func = get_name_convert_func() + for key in caffe2_checkpoint["blobs"].keys(): + converted_key = name_convert_func(key) + converted_key = c2_normal_to_sub_bn(converted_key, ms.state_dict()) + if converted_key in ms.state_dict(): + c2_blob_shape = caffe2_checkpoint["blobs"][key].shape + model_blob_shape = ms.state_dict()[converted_key].shape + # Load BN stats to Sub-BN. + if ( + len(model_blob_shape) == 1 + and len(c2_blob_shape) == 1 + and model_blob_shape[0] > c2_blob_shape[0] + and model_blob_shape[0] % c2_blob_shape[0] == 0 + ): + caffe2_checkpoint["blobs"][key] = np.concatenate( + [caffe2_checkpoint["blobs"][key]] + * (model_blob_shape[0] // c2_blob_shape[0]) + ) + c2_blob_shape = caffe2_checkpoint["blobs"][key].shape + + if c2_blob_shape == tuple(model_blob_shape): + state_dict[converted_key] = torch.tensor( + caffe2_checkpoint["blobs"][key] + ).clone() + logger.info( + "{}: {} => {}: {}".format( + key, + c2_blob_shape, + converted_key, + tuple(model_blob_shape), + ) + ) + else: + logger.warn( + "!! {}: {} does not match {}: {}".format( + key, + c2_blob_shape, + converted_key, + tuple(model_blob_shape), + ) + ) + else: + if not any( + prefix in key for prefix in ["momentum", "lr", "model_iter"] + ): + logger.warn( + "!! {}: can not be converted, got {}".format( + key, converted_key + ) + ) + ms.load_state_dict(state_dict, strict=False) + epoch = -1 + global_step=-1 + else: + # Load the checkpoint on CPU to avoid GPU mem spike. + with PathManager.open(path_to_checkpoint, "rb") as f: + checkpoint = torch.load(f, map_location="cpu") + model_state_dict_3d = ( + model.module.state_dict() if data_parallel else model.state_dict() + ) + checkpoint["model_state"] = normal_to_sub_bn( + checkpoint["model_state"], model_state_dict_3d + ) + if inflation: + # Try to inflate the model. + inflated_model_dict = inflate_weight( + checkpoint["model_state"], model_state_dict_3d + ) + ms.load_state_dict(inflated_model_dict, strict=False) + else: + ms.load_state_dict(checkpoint["model_state"]) + # Load the optimizer state (commonly not done when fine-tuning) + if optimizer: + optimizer.load_state_dict(checkpoint["optimizer_state"]) + if "epoch" in checkpoint.keys(): + epoch = checkpoint["epoch"] + else: + epoch = -1 + if "global_step" in checkpoint.keys(): + global_step=checkpoint["global_step"] + else: + global_step=-1 + return epoch,global_step + + +def sub_to_normal_bn(sd): + """ + Convert the Sub-BN paprameters to normal BN parameters in a state dict. + There are two copies of BN layers in a Sub-BN implementation: `bn.bn` and + `bn.split_bn`. `bn.split_bn` is used during training and + "compute_precise_bn". Before saving or evaluation, its stats are copied to + `bn.bn`. We rename `bn.bn` to `bn` and store it to be consistent with normal + BN layers. + Args: + sd (OrderedDict): a dict of parameters whitch might contain Sub-BN + parameters. + Returns: + new_sd (OrderedDict): a dict with Sub-BN parameters reshaped to + normal parameters. + """ + new_sd = copy.deepcopy(sd) + modifications = [ + ("bn.bn.running_mean", "bn.running_mean"), + ("bn.bn.running_var", "bn.running_var"), + ("bn.split_bn.num_batches_tracked", "bn.num_batches_tracked"), + ] + to_remove = ["bn.bn.", ".split_bn."] + for key in sd: + for before, after in modifications: + if key.endswith(before): + new_key = key.split(before)[0] + after + new_sd[new_key] = new_sd.pop(key) + + for rm in to_remove: + if rm in key and key in new_sd: + del new_sd[key] + + for key in new_sd: + if key.endswith("bn.weight") or key.endswith("bn.bias"): + if len(new_sd[key].size()) == 4: + assert all(d == 1 for d in new_sd[key].size()[1:]) + new_sd[key] = new_sd[key][:, 0, 0, 0] + + return new_sd + + +def c2_normal_to_sub_bn(key, model_keys): + """ + Convert BN parameters to Sub-BN parameters if model contains Sub-BNs. + Args: + key (OrderedDict): source dict of parameters. + mdoel_key (OrderedDict): target dict of parameters. + Returns: + new_sd (OrderedDict): converted dict of parameters. + """ + if "bn.running_" in key: + if key in model_keys: + return key + + new_key = key.replace("bn.running_", "bn.split_bn.running_") + if new_key in model_keys: + return new_key + else: + return key + + +def normal_to_sub_bn(checkpoint_sd, model_sd): + """ + Convert BN parameters to Sub-BN parameters if model contains Sub-BNs. + Args: + checkpoint_sd (OrderedDict): source dict of parameters. + model_sd (OrderedDict): target dict of parameters. + Returns: + new_sd (OrderedDict): converted dict of parameters. + """ + for key in model_sd: + if key not in checkpoint_sd: + if "bn.split_bn." in key: + load_key = key.replace("bn.split_bn.", "bn.") + bn_key = key.replace("bn.split_bn.", "bn.bn.") + checkpoint_sd[key] = checkpoint_sd.pop(load_key) + checkpoint_sd[bn_key] = checkpoint_sd[key] + + for key in model_sd: + if key in checkpoint_sd: + model_blob_shape = model_sd[key].shape + c2_blob_shape = checkpoint_sd[key].shape + + if ( + len(model_blob_shape) == 1 + and len(c2_blob_shape) == 1 + and model_blob_shape[0] > c2_blob_shape[0] + and model_blob_shape[0] % c2_blob_shape[0] == 0 + ): + before_shape = checkpoint_sd[key].shape + checkpoint_sd[key] = torch.cat( + [checkpoint_sd[key]] + * (model_blob_shape[0] // c2_blob_shape[0]) + ) + logger.info( + "{} {} -> {}".format( + key, before_shape, checkpoint_sd[key].shape + ) + ) + return checkpoint_sd + + +def load_test_checkpoint(cfg, model): + """ + Loading checkpoint logic for testing. + """ + # Load a checkpoint to test if applicable. + if cfg.TEST.CHECKPOINT_FILE_PATH != "": + # If no checkpoint found in MODEL_VIS.CHECKPOINT_FILE_PATH or in the current + # checkpoint folder, try to load checkpoint from + # TEST.CHECKPOINT_FILE_PATH and test it. + load_checkpoint( + cfg.TEST.CHECKPOINT_FILE_PATH, + model, + cfg.NUM_GPUS > 1, + None, + inflation=False, + convert_from_caffe2=cfg.TEST.CHECKPOINT_TYPE == "caffe2", + ) + elif has_checkpoint(cfg.OUTPUT_DIR): + last_checkpoint = get_last_checkpoint(cfg.OUTPUT_DIR) + load_checkpoint(last_checkpoint, model, cfg.NUM_GPUS > 1) + elif cfg.TRAIN.CHECKPOINT_FILE_PATH != "": + # If no checkpoint found in TEST.CHECKPOINT_FILE_PATH or in the current + # checkpoint folder, try to load checkpoint from + # TRAIN.CHECKPOINT_FILE_PATH and test it. + load_checkpoint( + cfg.TRAIN.CHECKPOINT_FILE_PATH, + model, + cfg.NUM_GPUS > 1, + None, + inflation=False, + convert_from_caffe2=cfg.TRAIN.CHECKPOINT_TYPE == "caffe2", + ) + else: + logger.info( + "Unknown way of loading checkpoint. Using with random initialization, only for debugging." + ) + + +def load_train_checkpoint(cfg, model, optimizer): + """ + Loading checkpoint logic for training. + """ + if cfg.TRAIN.AUTO_RESUME and has_checkpoint(cfg.OUTPUT_DIR): + last_checkpoint = get_last_checkpoint(cfg.OUTPUT_DIR) + logger.info("Load from last checkpoint, {}.".format(last_checkpoint)) + checkpoint_epoch,global_step = load_checkpoint( + last_checkpoint, model, cfg.NUM_GPUS > 1, optimizer + ) + start_epoch = checkpoint_epoch + 1 + global_step = global_step + 1 + elif cfg.TRAIN.CHECKPOINT_FILE_PATH != "": + if cfg.TRAIN.CHECKPOINT_TYPE=="backbone": + logger.info("Load backbone from given checkpoint file.") + load_backbone(model,cfg.TRAIN.CHECKPOINT_FILE_PATH) + start_epoch = 0 + global_step = 0 + else: + logger.info("Load from given checkpoint file.") + checkpoint_epoch, global_step = load_checkpoint( + cfg.TRAIN.CHECKPOINT_FILE_PATH, + model, + cfg.NUM_GPUS > 1, + optimizer, + inflation=cfg.TRAIN.CHECKPOINT_INFLATE, + convert_from_caffe2=cfg.TRAIN.CHECKPOINT_TYPE == "caffe2", + ) + start_epoch = checkpoint_epoch + 1 + global_step = global_step + 1 + else: + start_epoch = 0 + global_step = 0 + + return start_epoch, global_step + + + +def load_backbone(model,file): + current_state=model.state_dict() + checkpoint=torch.load(file) + + for key in checkpoint: + if key in current_state: + assert current_state[key].shape==checkpoint[key].shape + current_state[key]=checkpoint[key] + model.load_state_dict(current_state) + + return model + + diff --git a/training/detectors/utils/slowfast/utils/distributed.py b/training/detectors/utils/slowfast/utils/distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..bfbed8e8a4af5fc4b38c1558616fd3f640b587c0 --- /dev/null +++ b/training/detectors/utils/slowfast/utils/distributed.py @@ -0,0 +1,299 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Distributed helpers.""" + +import functools +import logging +import pickle +import torch +import torch.distributed as dist + +_LOCAL_PROCESS_GROUP = None + + +def all_gather(tensors): + """ + All gathers the provided tensors from all processes across machines. + Args: + tensors (list): tensors to perform all gather across all processes in + all machines. + """ + + gather_list = [] + output_tensor = [] + world_size = dist.get_world_size() + for tensor in tensors: + tensor_placeholder = [ + torch.ones_like(tensor) for _ in range(world_size) + ] + dist.all_gather(tensor_placeholder, tensor, async_op=False) + gather_list.append(tensor_placeholder) + for gathered_tensor in gather_list: + output_tensor.append(torch.cat(gathered_tensor, dim=0)) + return output_tensor + + +def all_reduce(tensors, average=True): + """ + All reduce the provided tensors from all processes across machines. + Args: + tensors (list): tensors to perform all reduce across all processes in + all machines. + average (bool): scales the reduced tensor by the number of overall + processes across all machines. + """ + + for tensor in tensors: + dist.all_reduce(tensor, async_op=False) + if average: + world_size = dist.get_world_size() + for tensor in tensors: + tensor.mul_(1.0 / world_size) + return tensors + + +def init_process_group( + local_rank, + local_world_size, + shard_id, + num_shards, + init_method, + dist_backend="nccl", +): + """ + Initializes the default process group. + Args: + local_rank (int): the rank on the current local machine. + local_world_size (int): the world size (number of processes running) on + the current local machine. + shard_id (int): the shard index (machine rank) of the current machine. + num_shards (int): number of shards for distributed training. + init_method (string): supporting three different methods for + initializing process groups: + "file": use shared file system to initialize the groups across + different processes. + "tcp": use tcp address to initialize the groups across different + dist_backend (string): backend to use for distributed training. Options + includes gloo, mpi and nccl, the details can be found here: + https://pytorch.org/docs/stable/distributed.html + """ + # Sets the GPU to use. + torch.cuda.set_device(local_rank) + # Initialize the process group. + proc_rank = local_rank + shard_id * local_world_size + world_size = local_world_size * num_shards + dist.init_process_group( + backend=dist_backend, + init_method=init_method, + world_size=world_size, + rank=proc_rank, + ) + + +def is_master_proc(num_gpus=8): + """ + Determines if the current process is the master process. + """ + if torch.distributed.is_initialized(): + return dist.get_rank() % num_gpus == 0 + else: + return True + + +def get_world_size(): + """ + Get the size of the world. + """ + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + """ + Get the rank of the current process. + """ + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + return dist.get_rank() + + +def synchronize(): + """ + Helper function to synchronize (barrier) among all processes when + using distributed training + """ + if not dist.is_available(): + return + if not dist.is_initialized(): + return + world_size = dist.get_world_size() + if world_size == 1: + return + dist.barrier() + + +@functools.lru_cache() +def _get_global_gloo_group(): + """ + Return a process group based on gloo backend, containing all the ranks + The result is cached. + Returns: + (group): pytorch dist group. + """ + if dist.get_backend() == "nccl": + return dist.new_group(backend="gloo") + else: + return dist.group.WORLD + + +def _serialize_to_tensor(data, group): + """ + Seriialize the tensor to ByteTensor. Note that only `gloo` and `nccl` + backend is supported. + Args: + data (data): data to be serialized. + group (group): pytorch dist group. + Returns: + tensor (ByteTensor): tensor that serialized. + """ + + backend = dist.get_backend(group) + assert backend in ["gloo", "nccl"] + device = torch.device("cpu" if backend == "gloo" else "cuda") + + buffer = pickle.dumps(data) + if len(buffer) > 1024 ** 3: + logger = logging.getLogger(__name__) + logger.warning( + "Rank {} trying to all-gather {:.2f} GB of data on device {}".format( + get_rank(), len(buffer) / (1024 ** 3), device + ) + ) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to(device=device) + return tensor + + +def _pad_to_largest_tensor(tensor, group): + """ + Padding all the tensors from different GPUs to the largest ones. + Args: + tensor (tensor): tensor to pad. + group (group): pytorch dist group. + Returns: + list[int]: size of the tensor, on each rank + Tensor: padded tensor that has the max size + """ + world_size = dist.get_world_size(group=group) + assert ( + world_size >= 1 + ), "comm.gather/all_gather must be called from ranks within the given group!" + local_size = torch.tensor( + [tensor.numel()], dtype=torch.int64, device=tensor.device + ) + size_list = [ + torch.zeros([1], dtype=torch.int64, device=tensor.device) + for _ in range(world_size) + ] + dist.all_gather(size_list, local_size, group=group) + size_list = [int(size.item()) for size in size_list] + + max_size = max(size_list) + + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + if local_size != max_size: + padding = torch.zeros( + (max_size - local_size,), dtype=torch.uint8, device=tensor.device + ) + tensor = torch.cat((tensor, padding), dim=0) + return size_list, tensor + + +def all_gather_unaligned(data, group=None): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors). + + Args: + data: any picklable object + group: a torch process group. By default, will use a group which + contains all ranks on gloo backend. + + Returns: + list[data]: list of data gathered from each rank + """ + if get_world_size() == 1: + return [data] + if group is None: + group = _get_global_gloo_group() + if dist.get_world_size(group) == 1: + return [data] + + tensor = _serialize_to_tensor(data, group) + + size_list, tensor = _pad_to_largest_tensor(tensor, group) + max_size = max(size_list) + + # receiving Tensor from all ranks + tensor_list = [ + torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) + for _ in size_list + ] + dist.all_gather(tensor_list, tensor, group=group) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def init_distributed_training(cfg): + """ + Initialize variables needed for distributed training. + """ + if cfg.NUM_GPUS <= 1: + return + num_gpus_per_machine = cfg.NUM_GPUS + num_machines = dist.get_world_size() // num_gpus_per_machine + for i in range(num_machines): + ranks_on_i = list( + range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine) + ) + pg = dist.new_group(ranks_on_i) + if i == cfg.SHARD_ID: + global _LOCAL_PROCESS_GROUP + _LOCAL_PROCESS_GROUP = pg + + +def get_local_size() -> int: + """ + Returns: + The size of the per-machine process group, + i.e. the number of processes per machine. + """ + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size(group=_LOCAL_PROCESS_GROUP) + + +def get_local_rank() -> int: + """ + Returns: + The rank of the current process within the local (per-machine) process group. + """ + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + assert _LOCAL_PROCESS_GROUP is not None + return dist.get_rank(group=_LOCAL_PROCESS_GROUP) diff --git a/training/detectors/utils/slowfast/utils/env.py b/training/detectors/utils/slowfast/utils/env.py new file mode 100644 index 0000000000000000000000000000000000000000..2554915089a6e20c9ce58bba7fa59136ed65c887 --- /dev/null +++ b/training/detectors/utils/slowfast/utils/env.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Set up Environment.""" + +import slowfast.utils.logging as logging + +_ENV_SETUP_DONE = False + + +def setup_environment(): + global _ENV_SETUP_DONE + if _ENV_SETUP_DONE: + return + _ENV_SETUP_DONE = True diff --git a/training/detectors/utils/slowfast/utils/logging.py b/training/detectors/utils/slowfast/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..f2763b3000c9be4a5499b310a3bc052fd5472d50 --- /dev/null +++ b/training/detectors/utils/slowfast/utils/logging.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Logging.""" + +import builtins +import decimal +import functools +import logging +import os +import sys +import simplejson +from fvcore.common.file_io import PathManager + +import slowfast.utils.distributed as du + + +def _suppress_print(): + """ + Suppresses printing from the current process. + """ + + def print_pass(*objects, sep=" ", end="\n", file=sys.stdout, flush=False): + pass + + builtins.print = print_pass + + +@functools.lru_cache(maxsize=None) +def _cached_log_stream(filename): + return PathManager.open(filename, "a") + + +def setup_logging(output_dir=None): + """ + Sets up the logging for multiple processes. Only enable the logging for the + master process, and suppress logging for the non-master processes. + """ + # Set up logging format. + _FORMAT = "[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s" + + if du.is_master_proc(): + # Enable logging for the master process. + logging.root.handlers = [] + else: + # Suppress logging for non-master processes. + _suppress_print() + + logger = logging.getLogger() + logger.setLevel(logging.DEBUG) + logger.propagate = False + plain_formatter = logging.Formatter( + "[%(asctime)s][%(levelname)s] %(name)s: %(lineno)4d: %(message)s", + datefmt="%m/%d %H:%M:%S", + ) + + if du.is_master_proc(): + ch = logging.StreamHandler(stream=sys.stdout) + ch.setLevel(logging.DEBUG) + ch.setFormatter(plain_formatter) + logger.addHandler(ch) + + if output_dir is not None and du.is_master_proc(du.get_world_size()): + filename = os.path.join(output_dir, "stdout.log") + fh = logging.StreamHandler(_cached_log_stream(filename)) + fh.setLevel(logging.DEBUG) + fh.setFormatter(plain_formatter) + logger.addHandler(fh) + + +def get_logger(name): + """ + Retrieve the logger with the specified name or, if name is None, return a + logger which is the root logger of the hierarchy. + Args: + name (string): name of the logger. + """ + return logging.getLogger(name) + + +def log_json_stats(stats): + """ + Logs json stats. + Args: + stats (dict): a dictionary of statistical information to log. + """ + stats = { + k: decimal.Decimal("{:.6f}".format(v)) if isinstance(v, float) else v + for k, v in stats.items() + } + json_stats = simplejson.dumps(stats, sort_keys=True, use_decimal=True) + logger = get_logger(__name__) + logger.info("json_stats: {:s}".format(json_stats)) diff --git a/training/detectors/utils/slowfast/utils/lr_policy.py b/training/detectors/utils/slowfast/utils/lr_policy.py new file mode 100644 index 0000000000000000000000000000000000000000..4c67f8e5d9d6d576928986521dbe2eac8b1ca7e5 --- /dev/null +++ b/training/detectors/utils/slowfast/utils/lr_policy.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Learning rate policy.""" + +import math + + +def get_lr_at_epoch(cfg, cur_epoch): + """ + Retrieve the learning rate of the current epoch with the option to perform + warm up in the beginning of the training stage. + Args: + cfg (CfgNode): configs. Details can be found in + slowfast/config/defaults.py + cur_epoch (float): the number of epoch of the current training stage. + """ + lr = get_lr_func(cfg.SOLVER.LR_POLICY)(cfg, cur_epoch) + # Perform warm up. + if cur_epoch < cfg.SOLVER.WARMUP_EPOCHS: + lr_start = cfg.SOLVER.WARMUP_START_LR + lr_end = get_lr_func(cfg.SOLVER.LR_POLICY)( + cfg, cfg.SOLVER.WARMUP_EPOCHS + ) + alpha = (lr_end - lr_start) / cfg.SOLVER.WARMUP_EPOCHS + lr = cur_epoch * alpha + lr_start + return lr + +def get_lr_at_iter(cfg,cur_iter): + """LR schedule that should yield 76% converged accuracy with batch size 256""" + start_step = cfg.SOLVER.TOTAL_STEP- cfg.SOLVER.LR_STEP + duration_step = cfg.SOLVER.LR_STEP + base_lr=float(cfg.SOLVER.BASE_LR) + if cur_iter <= start_step: + return base_lr + else: + this_step = cur_iter - start_step + lr = base_lr * ((this_step / duration_step) ** 2.0) + return lr + + +def lr_func_cosine(cfg, cur_epoch): + """ + Retrieve the learning rate to specified values at specified epoch with the + cosine learning rate schedule. Details can be found in: + Ilya Loshchilov, and Frank Hutter + SGDR: Stochastic Gradient Descent With Warm Restarts. + Args: + cfg (CfgNode): configs. Details can be found in + slowfast/config/defaults.py + cur_epoch (float): the number of epoch of the current training stage. + """ + return ( + cfg.SOLVER.BASE_LR + * (math.cos(math.pi * cur_epoch / cfg.SOLVER.MAX_EPOCH) + 1.0) + * 0.5 + ) + + +def lr_func_steps_with_relative_lrs(cfg, cur_epoch): + """ + Retrieve the learning rate to specified values at specified epoch with the + steps with relative learning rate schedule. + Args: + cfg (CfgNode): configs. Details can be found in + slowfast/config/defaults.py + cur_epoch (float): the number of epoch of the current training stage. + """ + ind = get_step_index(cfg, cur_epoch) + return cfg.SOLVER.LRS[ind] * cfg.SOLVER.BASE_LR + + +def get_step_index(cfg, cur_epoch): + """ + Retrieves the lr step index for the given epoch. + Args: + cfg (CfgNode): configs. Details can be found in + slowfast/config/defaults.py + cur_epoch (float): the number of epoch of the current training stage. + """ + steps = cfg.SOLVER.STEPS + [cfg.SOLVER.MAX_EPOCH] + for ind, step in enumerate(steps): # NoQA + if cur_epoch < step: + break + return ind - 1 + + +def get_lr_func(lr_policy): + """ + Given the configs, retrieve the specified lr policy function. + Args: + lr_policy (string): the learning rate policy to use for the job. + """ + policy = "lr_func_" + lr_policy + if policy not in globals(): + raise NotImplementedError("Unknown LR policy: {}".format(lr_policy)) + else: + return globals()[policy] diff --git a/training/detectors/utils/slowfast/utils/meters.py b/training/detectors/utils/slowfast/utils/meters.py new file mode 100644 index 0000000000000000000000000000000000000000..2c4e9582a2bb5f6685987ce1f0ce391ac3950419 --- /dev/null +++ b/training/detectors/utils/slowfast/utils/meters.py @@ -0,0 +1,841 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Meters.""" + +import datetime +import numpy as np +import os +from collections import defaultdict, deque +import torch +from fvcore.common.timer import Timer +from sklearn.metrics import average_precision_score + +import slowfast.datasets.ava_helper as ava_helper +import slowfast.utils.logging as logging +import slowfast.utils.metrics as metrics +import slowfast.utils.misc as misc +from slowfast.utils.ava_eval_helper import ( + evaluate_ava, + read_csv, + read_exclusions, + read_labelmap, +) + +logger = logging.get_logger(__name__) + + +def get_ava_mini_groundtruth(full_groundtruth): + """ + Get the groundtruth annotations corresponding the "subset" of AVA val set. + We define the subset to be the frames such that (second % 4 == 0). + We optionally use subset for faster evaluation during training + (in order to track training progress). + Args: + full_groundtruth(dict): list of groundtruth. + """ + ret = [defaultdict(list), defaultdict(list), defaultdict(list)] + + for i in range(3): + for key in full_groundtruth[i].keys(): + if int(key.split(",")[1]) % 4 == 0: + ret[i][key] = full_groundtruth[i][key] + return ret + + +class AVAMeter(object): + """ + Measure the AVA train, val, and test stats. + """ + + def __init__(self, overall_iters, cfg, mode): + """ + overall_iters (int): the overall number of iterations of one epoch. + cfg (CfgNode): configs. + mode (str): `train`, `val`, or `test` mode. + """ + self.cfg = cfg + self.lr = None + self.loss = ScalarMeter(cfg.LOG_PERIOD) + self.full_ava_test = cfg.AVA.FULL_TEST_ON_VAL + self.mode = mode + self.iter_timer = Timer() + self.all_preds = [] + self.all_ori_boxes = [] + self.all_metadata = [] + self.overall_iters = overall_iters + self.excluded_keys = read_exclusions( + os.path.join(cfg.AVA.ANNOTATION_DIR, cfg.AVA.EXCLUSION_FILE) + ) + self.categories, self.class_whitelist = read_labelmap( + os.path.join(cfg.AVA.ANNOTATION_DIR, cfg.AVA.LABEL_MAP_FILE) + ) + gt_filename = os.path.join( + cfg.AVA.ANNOTATION_DIR, cfg.AVA.GROUNDTRUTH_FILE + ) + self.full_groundtruth = read_csv(gt_filename, self.class_whitelist) + self.mini_groundtruth = get_ava_mini_groundtruth(self.full_groundtruth) + + _, self.video_idx_to_name = ava_helper.load_image_lists( + cfg, mode == "train" + ) + + def log_iter_stats(self, cur_epoch, cur_iter): + """ + Log the stats. + Args: + cur_epoch (int): the current epoch. + cur_iter (int): the current iteration. + """ + + if (cur_iter + 1) % self.cfg.LOG_PERIOD != 0: + return + + eta_sec = self.iter_timer.seconds() * (self.overall_iters - cur_iter) + eta = str(datetime.timedelta(seconds=int(eta_sec))) + if self.mode == "train": + stats = { + "_type": "{}_iter".format(self.mode), + "cur_epoch": "{}".format(cur_epoch + 1), + "cur_iter": "{}".format(cur_iter + 1), + "eta": eta, + "time_diff": self.iter_timer.seconds(), + "mode": self.mode, + "loss": self.loss.get_win_median(), + "lr": self.lr, + } + elif self.mode == "val": + stats = { + "_type": "{}_iter".format(self.mode), + "cur_epoch": "{}".format(cur_epoch + 1), + "cur_iter": "{}".format(cur_iter + 1), + "eta": eta, + "time_diff": self.iter_timer.seconds(), + "mode": self.mode, + } + elif self.mode == "test": + stats = { + "_type": "{}_iter".format(self.mode), + "cur_iter": "{}".format(cur_iter + 1), + "eta": eta, + "time_diff": self.iter_timer.seconds(), + "mode": self.mode, + } + else: + raise NotImplementedError("Unknown mode: {}".format(self.mode)) + + logging.log_json_stats(stats) + + def iter_tic(self): + """ + Start to record time. + """ + self.iter_timer.reset() + + def iter_toc(self): + """ + Stop to record time. + """ + self.iter_timer.pause() + + def reset(self): + """ + Reset the Meter. + """ + self.loss.reset() + + self.all_preds = [] + self.all_ori_boxes = [] + self.all_metadata = [] + + def update_stats(self, preds, ori_boxes, metadata, loss=None, lr=None): + """ + Update the current stats. + Args: + preds (tensor): prediction embedding. + ori_boxes (tensor): original boxes (x1, y1, x2, y2). + metadata (tensor): metadata of the AVA data. + loss (float): loss value. + lr (float): learning rate. + """ + if self.mode in ["val", "test"]: + self.all_preds.append(preds) + self.all_ori_boxes.append(ori_boxes) + self.all_metadata.append(metadata) + if loss is not None: + self.loss.add_value(loss) + if lr is not None: + self.lr = lr + + def finalize_metrics(self, log=True): + """ + Calculate and log the final AVA metrics. + """ + all_preds = torch.cat(self.all_preds, dim=0) + all_ori_boxes = torch.cat(self.all_ori_boxes, dim=0) + all_metadata = torch.cat(self.all_metadata, dim=0) + + if self.mode == "test" or (self.full_ava_test and self.mode == "val"): + groundtruth = self.full_groundtruth + else: + groundtruth = self.mini_groundtruth + + self.full_map = evaluate_ava( + all_preds, + all_ori_boxes, + all_metadata.tolist(), + self.excluded_keys, + self.class_whitelist, + self.categories, + groundtruth=groundtruth, + video_idx_to_name=self.video_idx_to_name, + ) + if log: + stats = {"mode": self.mode, "map": self.full_map} + logging.log_json_stats(stats) + + def log_epoch_stats(self, cur_epoch): + """ + Log the stats of the current epoch. + Args: + cur_epoch (int): the number of current epoch. + """ + if self.mode in ["val", "test"]: + self.finalize_metrics(log=False) + stats = { + "_type": "{}_epoch".format(self.mode), + "cur_epoch": "{}".format(cur_epoch + 1), + "mode": self.mode, + "map": self.full_map, + "gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()), + "RAM": "{:.2f}/{:.2f} GB".format(*misc.cpu_mem_usage()), + } + logging.log_json_stats(stats) + + +class TestMeter(object): + """ + Perform the multi-view ensemble for testing: each video with an unique index + will be sampled with multiple clips, and the predictions of the clips will + be aggregated to produce the final prediction for the video. + The accuracy is calculated with the given ground truth labels. + """ + + def __init__( + self, + num_videos, + num_clips, + num_cls, + overall_iters, + multi_label=False, + ensemble_method="sum", + ): + """ + Construct tensors to store the predictions and labels. Expect to get + num_clips predictions from each video, and calculate the metrics on + num_videos videos. + Args: + num_videos (int): number of videos to test. + num_clips (int): number of clips sampled from each video for + aggregating the final prediction for the video. + num_cls (int): number of classes for each prediction. + overall_iters (int): overall iterations for testing. + multi_label (bool): if True, use map as the metric. + ensemble_method (str): method to perform the ensemble, options + include "sum", and "max". + """ + + self.iter_timer = Timer() + self.num_clips = num_clips + self.overall_iters = overall_iters + self.multi_label = multi_label + self.ensemble_method = ensemble_method + # Initialize tensors. + self.video_preds = torch.zeros((num_videos, num_cls)) + if multi_label: + self.video_preds -= 1e10 + + self.video_labels = ( + torch.zeros((num_videos, num_cls)) + if multi_label + else torch.zeros((num_videos)).long() + ) + self.clip_count = torch.zeros((num_videos)).long() + # Reset metric. + self.reset() + + def reset(self): + """ + Reset the metric. + """ + self.clip_count.zero_() + self.video_preds.zero_() + if self.multi_label: + self.video_preds -= 1e10 + self.video_labels.zero_() + + def update_stats(self, preds, labels, clip_ids): + """ + Collect the predictions from the current batch and perform on-the-flight + summation as ensemble. + Args: + preds (tensor): predictions from the current batch. Dimension is + N x C where N is the batch size and C is the channel size + (num_cls). + labels (tensor): the corresponding labels of the current batch. + Dimension is N. + clip_ids (tensor): clip indexes of the current batch, dimension is + N. + """ + for ind in range(preds.shape[0]): + vid_id = int(clip_ids[ind]) // self.num_clips + if self.video_labels[vid_id].sum() > 0: + assert torch.equal( + self.video_labels[vid_id].type(torch.FloatTensor), + labels[ind].type(torch.FloatTensor), + ) + self.video_labels[vid_id] = labels[ind] + if self.ensemble_method == "sum": + self.video_preds[vid_id] += preds[ind] + elif self.ensemble_method == "max": + self.video_preds[vid_id] = torch.max( + self.video_preds[vid_id], preds[ind] + ) + else: + raise NotImplementedError( + "Ensemble Method {} is not supported".format( + self.ensemble_method + ) + ) + self.clip_count[vid_id] += 1 + + def log_iter_stats(self, cur_iter): + """ + Log the stats. + Args: + cur_iter (int): the current iteration of testing. + """ + eta_sec = self.iter_timer.seconds() * (self.overall_iters - cur_iter) + eta = str(datetime.timedelta(seconds=int(eta_sec))) + stats = { + "split": "test_iter", + "cur_iter": "{}".format(cur_iter + 1), + "eta": eta, + "time_diff": self.iter_timer.seconds(), + } + logging.log_json_stats(stats) + + def iter_tic(self): + self.iter_timer.reset() + + def iter_toc(self): + self.iter_timer.pause() + + def finalize_metrics(self, ks=(1, 5)): + """ + Calculate and log the final ensembled metrics. + ks (tuple): list of top-k values for topk_accuracies. For example, + ks = (1, 5) correspods to top-1 and top-5 accuracy. + """ + if not all(self.clip_count == self.num_clips): + logger.warning( + "clip count {} ~= num clips {}".format( + ", ".join( + [ + "{}: {}".format(i, k) + for i, k in enumerate(self.clip_count.tolist()) + ] + ), + self.num_clips, + ) + ) + + stats = {"split": "test_final"} + if self.multi_label: + map = get_map( + self.video_preds.cpu().numpy(), self.video_labels.cpu().numpy() + ) + stats["map"] = map + else: + num_topks_correct = metrics.topks_correct( + self.video_preds, self.video_labels, ks + ) + topks = [ + (x / self.video_preds.size(0)) * 100.0 + for x in num_topks_correct + ] + assert len({len(ks), len(topks)}) == 1 + for k, topk in zip(ks, topks): + stats["top{}_acc".format(k)] = "{:.{prec}f}".format( + topk, prec=2 + ) + logging.log_json_stats(stats) + + +class ScalarMeter(object): + """ + A scalar meter uses a deque to track a series of scaler values with a given + window size. It supports calculating the median and average values of the + window, and also supports calculating the global average. + """ + + def __init__(self, window_size): + """ + Args: + window_size (int): size of the max length of the deque. + """ + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + + def reset(self): + """ + Reset the deque. + """ + self.deque.clear() + self.total = 0.0 + self.count = 0 + + def add_value(self, value): + """ + Add a new scalar value to the deque. + """ + self.deque.append(value) + self.count += 1 + self.total += value + + def get_win_median(self): + """ + Calculate the current median value of the deque. + """ + return np.median(self.deque) + + def get_win_avg(self): + """ + Calculate the current average value of the deque. + """ + return np.mean(self.deque) + + def get_global_avg(self): + """ + Calculate the global mean value. + """ + return self.total / self.count + + +class TrainMeter(object): + """ + Measure training stats. + """ + + def __init__(self, epoch_iters, cfg): + """ + Args: + epoch_iters (int): the overall number of iterations of one epoch. + cfg (CfgNode): configs. + """ + self._cfg = cfg + self.epoch_iters = epoch_iters + self.MAX_EPOCH = cfg.SOLVER.MAX_EPOCH * epoch_iters + self.iter_timer = Timer() + self.loss = ScalarMeter(cfg.LOG_PERIOD) + self.loss_total = 0.0 + self.lr = None + # Current minibatch errors (smoothed over a window). + self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD) + self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD) + # Number of misclassified examples. + self.num_top1_mis = 0 + self.num_top5_mis = 0 + self.num_samples = 0 + + def reset(self): + """ + Reset the Meter. + """ + self.loss.reset() + self.loss_total = 0.0 + self.lr = None + self.mb_top1_err.reset() + self.mb_top5_err.reset() + self.num_top1_mis = 0 + self.num_top5_mis = 0 + self.num_samples = 0 + + def iter_tic(self): + """ + Start to record time. + """ + self.iter_timer.reset() + + def iter_toc(self): + """ + Stop to record time. + """ + self.iter_timer.pause() + + def update_stats(self, top1_err, top5_err, loss, lr, mb_size): + """ + Update the current stats. + Args: + top1_err (float): top1 error rate. + top5_err (float): top5 error rate. + loss (float): loss value. + lr (float): learning rate. + mb_size (int): mini batch size. + """ + self.loss.add_value(loss) + self.lr = lr + self.loss_total += loss * mb_size + self.num_samples += mb_size + + if not self._cfg.DATA.MULTI_LABEL: + # Current minibatch stats + self.mb_top1_err.add_value(top1_err) + self.mb_top5_err.add_value(top5_err) + # Aggregate stats + self.num_top1_mis += top1_err * mb_size + self.num_top5_mis += top5_err * mb_size + + def log_iter_stats(self, cur_epoch, cur_iter): + """ + log the stats of the current iteration. + Args: + cur_epoch (int): the number of current epoch. + cur_iter (int): the number of current iteration. + """ + if (cur_iter + 1) % self._cfg.LOG_PERIOD != 0: + return + eta_sec = self.iter_timer.seconds() * ( + self.MAX_EPOCH - (cur_epoch * self.epoch_iters + cur_iter + 1) + ) + eta = str(datetime.timedelta(seconds=int(eta_sec))) + stats = { + "_type": "train_iter", + "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH), + "iter": "{}/{}".format(cur_iter + 1, self.epoch_iters), + "time_diff": self.iter_timer.seconds(), + "eta": eta, + "loss": self.loss.get_win_median(), + "lr": self.lr, + "gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()), + } + if not self._cfg.DATA.MULTI_LABEL: + stats["top1_err"] = self.mb_top1_err.get_win_median() + stats["top5_err"] = self.mb_top5_err.get_win_median() + logging.log_json_stats(stats) + + def log_epoch_stats(self, cur_epoch): + """ + Log the stats of the current epoch. + Args: + cur_epoch (int): the number of current epoch. + """ + eta_sec = self.iter_timer.seconds() * ( + self.MAX_EPOCH - (cur_epoch + 1) * self.epoch_iters + ) + eta = str(datetime.timedelta(seconds=int(eta_sec))) + stats = { + "_type": "train_epoch", + "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH), + "time_diff": self.iter_timer.seconds(), + "eta": eta, + "lr": self.lr, + "gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()), + "RAM": "{:.2f}/{:.2f} GB".format(*misc.cpu_mem_usage()), + } + if not self._cfg.DATA.MULTI_LABEL: + top1_err = self.num_top1_mis / self.num_samples + top5_err = self.num_top5_mis / self.num_samples + avg_loss = self.loss_total / self.num_samples + stats["top1_err"] = top1_err + stats["top5_err"] = top5_err + stats["loss"] = avg_loss + logging.log_json_stats(stats) + + +class TrainIterMeter(object): + """ + Measure training stats. + """ + + def __init__(self, epoch_iters, cfg,extra=[]): + """ + Args: + epoch_iters (int): the overall number of iterations of one epoch. + cfg (CfgNode): configs. + """ + self._cfg = cfg + self.epoch_iters = epoch_iters + self.MAX_EPOCH = cfg.SOLVER.MAX_EPOCH * epoch_iters + self.iter_timer = Timer() + self.loss = ScalarMeter(cfg.LOG_PERIOD) + self.loss_total = 0.0 + self.lr = None + + # Number of misclassified examples. + self.num_samples = 0 + + self.meters={key:ScalarMeter(cfg.LOG_PERIOD) for key in extra} + + def reset(self): + """ + Reset the Meter. + """ + self.loss.reset() + self.loss_total = 0.0 + self.lr = None + + + self.num_samples = 0 + + for meter in self.meters.values(): + meter.reset() + + def iter_tic(self): + """ + Start to record time. + """ + self.iter_timer.reset() + + def iter_toc(self): + """ + Stop to record time. + """ + self.iter_timer.pause() + + def update_stats(self, loss, lr, mb_size,extra={}): + """ + Update the current stats. + Args: + top1_err (float): top1 error rate. + top5_err (float): top5 error rate. + loss (float): loss value. + lr (float): learning rate. + mb_size (int): mini batch size. + """ + self.loss.add_value(loss) + self.lr = lr + self.loss_total += loss * mb_size + self.num_samples += mb_size + + + for key,val in extra.items(): + self.meters[key].add_value(val) + + def log_iter_stats(self, cur_epoch, cur_iter,extra={}): + """ + log the stats of the current iteration. + Args: + cur_epoch (int): the number of current epoch. + cur_iter (int): the number of current iteration. + """ + if (cur_iter + 1) % self._cfg.LOG_PERIOD != 0: + return + eta_sec = self.iter_timer.seconds() * ( + self.MAX_EPOCH - (cur_epoch * self.epoch_iters + cur_iter + 1) + ) + eta = str(datetime.timedelta(seconds=int(eta_sec))) + stats = { + "_type": "train_iter", + "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH), + "iter": "{}/{}".format(cur_iter + 1, self.epoch_iters), + "time_diff": self.iter_timer.seconds(), + "eta": eta, + "loss": self.loss.get_win_median(), + "lr": self.lr, + "gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()), + } + + for key,meter in self.meters.items(): + stats[key]=meter.get_win_median() + for key,val in extra.items(): + stats[key]=val + + logging.log_json_stats(stats) + + def log_epoch_stats(self, cur_epoch): + """ + Log the stats of the current epoch. + Args: + cur_epoch (int): the number of current epoch. + """ + eta_sec = self.iter_timer.seconds() * ( + self.MAX_EPOCH - (cur_epoch + 1) * self.epoch_iters + ) + eta = str(datetime.timedelta(seconds=int(eta_sec))) + stats = { + "_type": "train_epoch", + "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH), + "time_diff": self.iter_timer.seconds(), + "eta": eta, + "lr": self.lr, + "gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()), + "RAM": "{:.2f}/{:.2f} GB".format(*misc.cpu_mem_usage()), + } + if not self._cfg.DATA.MULTI_LABEL: + avg_loss = self.loss_total / self.num_samples + stats["loss"] = avg_loss + logging.log_json_stats(stats) + + + + +class ValMeter(object): + """ + Measures validation stats. + """ + + def __init__(self, max_iter, cfg): + """ + Args: + max_iter (int): the max number of iteration of the current epoch. + cfg (CfgNode): configs. + """ + self._cfg = cfg + self.max_iter = max_iter + self.iter_timer = Timer() + # Current minibatch errors (smoothed over a window). + self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD) + self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD) + # Min errors (over the full val set). + self.min_top1_err = 100.0 + self.min_top5_err = 100.0 + # Number of misclassified examples. + self.num_top1_mis = 0 + self.num_top5_mis = 0 + self.num_samples = 0 + self.all_preds = [] + self.all_labels = [] + + def reset(self): + """ + Reset the Meter. + """ + self.iter_timer.reset() + self.mb_top1_err.reset() + self.mb_top5_err.reset() + self.num_top1_mis = 0 + self.num_top5_mis = 0 + self.num_samples = 0 + self.all_preds = [] + self.all_labels = [] + + def iter_tic(self): + """ + Start to record time. + """ + self.iter_timer.reset() + + def iter_toc(self): + """ + Stop to record time. + """ + self.iter_timer.pause() + + def update_stats(self, top1_err, top5_err, mb_size): + """ + Update the current stats. + Args: + top1_err (float): top1 error rate. + top5_err (float): top5 error rate. + mb_size (int): mini batch size. + """ + self.mb_top1_err.add_value(top1_err) + self.mb_top5_err.add_value(top5_err) + self.num_top1_mis += top1_err * mb_size + self.num_top5_mis += top5_err * mb_size + self.num_samples += mb_size + + def update_predictions(self, preds, labels): + """ + Update predictions and labels. + Args: + preds (tensor): model output predictions. + labels (tensor): labels. + """ + # TODO: merge update_prediction with update_stats. + self.all_preds.append(preds) + self.all_labels.append(labels) + + def log_iter_stats(self, cur_epoch, cur_iter): + """ + log the stats of the current iteration. + Args: + cur_epoch (int): the number of current epoch. + cur_iter (int): the number of current iteration. + """ + if (cur_iter + 1) % self._cfg.LOG_PERIOD != 0: + return + eta_sec = self.iter_timer.seconds() * (self.max_iter - cur_iter - 1) + eta = str(datetime.timedelta(seconds=int(eta_sec))) + stats = { + "_type": "val_iter", + "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH), + "iter": "{}/{}".format(cur_iter + 1, self.max_iter), + "time_diff": self.iter_timer.seconds(), + "eta": eta, + "gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()), + } + if not self._cfg.DATA.MULTI_LABEL: + stats["top1_err"] = self.mb_top1_err.get_win_median() + stats["top5_err"] = self.mb_top5_err.get_win_median() + logging.log_json_stats(stats) + + def log_epoch_stats(self, cur_epoch): + """ + Log the stats of the current epoch. + Args: + cur_epoch (int): the number of current epoch. + """ + stats = { + "_type": "val_epoch", + "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH), + "time_diff": self.iter_timer.seconds(), + "gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()), + "RAM": "{:.2f}/{:.2f} GB".format(*misc.cpu_mem_usage()), + } + if self._cfg.DATA.MULTI_LABEL: + stats["map"] = get_map( + torch.cat(self.all_preds).cpu().numpy(), + torch.cat(self.all_labels).cpu().numpy(), + ) + else: + top1_err = self.num_top1_mis / self.num_samples + top5_err = self.num_top5_mis / self.num_samples + self.min_top1_err = min(self.min_top1_err, top1_err) + self.min_top5_err = min(self.min_top5_err, top5_err) + + stats["top1_err"] = top1_err + stats["top5_err"] = top5_err + stats["min_top1_err"] = self.min_top1_err + stats["min_top5_err"] = self.min_top5_err + + logging.log_json_stats(stats) + + +def get_map(preds, labels): + """ + Compute mAP for multi-label case. + Args: + preds (numpy tensor): num_examples x num_classes. + labels (numpy tensor): num_examples x num_classes. + Returns: + mean_ap (int): final mAP score. + """ + + logger.info("Getting mAP for {} examples".format(preds.shape[0])) + + preds = preds[:, ~(np.all(labels == 0, axis=0))] + labels = labels[:, ~(np.all(labels == 0, axis=0))] + aps = [0] + try: + aps = average_precision_score(labels, preds, average=None) + except ValueError: + print( + "Average precision requires a sufficient number of samples \ + in a batch which are missing in this sample." + ) + + mean_ap = np.mean(aps) + return mean_ap diff --git a/training/detectors/utils/slowfast/utils/metrics.py b/training/detectors/utils/slowfast/utils/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..0ef01b174aa5c3d54da77923f515f244327c4e80 --- /dev/null +++ b/training/detectors/utils/slowfast/utils/metrics.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Functions for computing metrics.""" + +import torch + + +def topks_correct(preds, labels, ks): + """ + Given the predictions, labels, and a list of top-k values, compute the + number of correct predictions for each top-k value. + + Args: + preds (array): array of predictions. Dimension is batchsize + N x ClassNum. + labels (array): array of labels. Dimension is batchsize N. + ks (list): list of top-k values. For example, ks = [1, 5] correspods + to top-1 and top-5. + + Returns: + topks_correct (list): list of numbers, where the `i`-th entry + corresponds to the number of top-`ks[i]` correct predictions. + """ + assert preds.size(0) == labels.size( + 0 + ), "Batch dim of predictions and labels must match" + # Find the top max_k predictions for each sample + _top_max_k_vals, top_max_k_inds = torch.topk( + preds, max(ks), dim=1, largest=True, sorted=True + ) + # (batch_size, max_k) -> (max_k, batch_size). + top_max_k_inds = top_max_k_inds.t() + # (batch_size, ) -> (max_k, batch_size). + rep_max_k_labels = labels.view(1, -1).expand_as(top_max_k_inds) + # (i, j) = 1 if top i-th prediction for the j-th sample is correct. + top_max_k_correct = top_max_k_inds.eq(rep_max_k_labels) + # Compute the number of topk correct predictions for each k. + topks_correct = [ + top_max_k_correct[:k, :].view(-1).float().sum() for k in ks + ] + return topks_correct + + +def topk_errors(preds, labels, ks): + """ + Computes the top-k error for each k. + Args: + preds (array): array of predictions. Dimension is N. + labels (array): array of labels. Dimension is N. + ks (list): list of ks to calculate the top accuracies. + """ + num_topks_correct = topks_correct(preds, labels, ks) + return [(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct] + + +def topk_accuracies(preds, labels, ks): + """ + Computes the top-k accuracy for each k. + Args: + preds (array): array of predictions. Dimension is N. + labels (array): array of labels. Dimension is N. + ks (list): list of ks to calculate the top accuracies. + """ + num_topks_correct = topks_correct(preds, labels, ks) + return [(x / preds.size(0)) * 100.0 for x in num_topks_correct] diff --git a/training/detectors/utils/slowfast/utils/misc.py b/training/detectors/utils/slowfast/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..13fea609bca56845d61819707a66cbd807b956d8 --- /dev/null +++ b/training/detectors/utils/slowfast/utils/misc.py @@ -0,0 +1,359 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +import json +import logging +import math +import numpy as np +import os +from datetime import datetime +import psutil +import torch +from fvcore.common.file_io import PathManager +from fvcore.nn.activation_count import activation_count +from fvcore.nn.flop_count import flop_count +from matplotlib import pyplot as plt +from torch import nn + +import slowfast.utils.logging as logging +import slowfast.utils.multiprocessing as mpu +from slowfast.datasets.utils import pack_pathway_output +from slowfast.models.batchnorm_helper import SubBatchNorm3d + +logger = logging.get_logger(__name__) + + +def check_nan_losses(loss): + """ + Determine whether the loss is NaN (not a number). + Args: + loss (loss): loss to check whether is NaN. + """ + if math.isnan(loss): + raise RuntimeError("ERROR: Got NaN losses {}".format(datetime.now())) + + +def params_count(model): + """ + Compute the number of parameters. + Args: + model (model): model to count the number of parameters. + """ + return np.sum([p.numel() for p in model.parameters()]).item() + + +def gpu_mem_usage(): + """ + Compute the GPU memory usage for the current device (GB). + """ + if torch.cuda.is_available(): + mem_usage_bytes = torch.cuda.max_memory_allocated() + else: + mem_usage_bytes = 0 + return mem_usage_bytes / 1024 ** 3 + + +def cpu_mem_usage(): + """ + Compute the system memory (RAM) usage for the current device (GB). + Returns: + usage (float): used memory (GB). + total (float): total memory (GB). + """ + vram = psutil.virtual_memory() + usage = (vram.total - vram.available) / 1024 ** 3 + total = vram.total / 1024 ** 3 + + return usage, total + + +def _get_model_analysis_input(cfg, use_train_input): + """ + Return a dummy input for model analysis with batch size 1. The input is + used for analyzing the model (counting flops and activations etc.). + Args: + cfg (CfgNode): configs. Details can be found in + slowfast/config/defaults.py + use_train_input (bool): if True, return the input for training. Otherwise, + return the input for testing. + + Returns: + inputs: the input for model analysis. + """ + rgb_dimension = 3 + if use_train_input: + input_tensors = torch.rand( + rgb_dimension, + cfg.DATA.NUM_FRAMES, + cfg.DATA.TRAIN_CROP_SIZE, + cfg.DATA.TRAIN_CROP_SIZE, + ) + else: + input_tensors = torch.rand( + rgb_dimension, + cfg.DATA.NUM_FRAMES, + cfg.DATA.TEST_CROP_SIZE, + cfg.DATA.TEST_CROP_SIZE, + ) + model_inputs = pack_pathway_output(cfg, input_tensors) + for i in range(len(model_inputs)): + model_inputs[i] = model_inputs[i].unsqueeze(0) + if cfg.NUM_GPUS: + model_inputs[i] = model_inputs[i].cuda(non_blocking=True) + + # If detection is enabled, count flops for one proposal. + if cfg.DETECTION.ENABLE: + bbox = torch.tensor([[0, 0, 1.0, 0, 1.0]]) + if cfg.NUM_GPUS: + bbox = bbox.cuda() + inputs = (model_inputs, bbox) + else: + inputs = (model_inputs,) + return inputs + + +def get_model_stats(model, cfg, mode, use_train_input): + """ + Compute statistics for the current model given the config. + Args: + model (model): model to perform analysis. + cfg (CfgNode): configs. Details can be found in + slowfast/config/defaults.py + mode (str): Options include `flop` or `activation`. Compute either flop + (gflops) or activation count (mega). + use_train_input (bool): if True, compute statistics for training. Otherwise, + compute statistics for testing. + + Returns: + float: the total number of count of the given model. + """ + assert mode in [ + "flop", + "activation", + ], "'{}' not supported for model analysis".format(mode) + if mode == "flop": + model_stats_fun = flop_count + elif mode == "activation": + model_stats_fun = activation_count + + # Set model to evaluation mode for analysis. + # Evaluation mode can avoid getting stuck with sync batchnorm. + model_mode = model.training + model.eval() + inputs = _get_model_analysis_input(cfg, use_train_input) + count_dict, _ = model_stats_fun(model, inputs) + count = sum(count_dict.values()) + model.train(model_mode) + return count + + +def log_model_info(model, cfg, use_train_input=True): + """ + Log info, includes number of parameters, gpu usage, gflops and activation count. + The model info is computed when the model is in validation mode. + Args: + model (model): model to log the info. + cfg (CfgNode): configs. Details can be found in + slowfast/config/defaults.py + use_train_input (bool): if True, log info for training. Otherwise, + log info for testing. + """ + print("Model:\n{}".format(model)) + print("Params: {:,}".format(params_count(model))) + print("Mem: {:,} MB".format(gpu_mem_usage())) + print( + "Flops: {:,} G".format( + get_model_stats(model, cfg, "flop", use_train_input) + ) + ) + print( + "Activations: {:,} M".format( + get_model_stats(model, cfg, "activation", use_train_input) + ) + ) + logger.info("nvidia-smi") + os.system("nvidia-smi") + +def is_eval_epoch(cfg, cur_epoch, multigrid_schedule): + """ + Determine if the model should be evaluated at the current epoch. + Args: + cfg (CfgNode): configs. Details can be found in + slowfast/config/defaults.py + cur_epoch (int): current epoch. + multigrid_schedule (List): schedule for multigrid training. + """ + if cur_epoch + 1 == cfg.SOLVER.MAX_EPOCH: + return True + if multigrid_schedule is not None: + prev_epoch = 0 + for s in multigrid_schedule: + if cur_epoch < s[-1]: + period = max( + (s[-1] - prev_epoch) // cfg.MULTIGRID.EVAL_FREQ + 1, 1 + ) + return (s[-1] - 1 - cur_epoch) % period == 0 + prev_epoch = s[-1] + + return (cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 + + +def plot_input(tensor, bboxes=(), texts=(), path="./tmp_vis.png"): + """ + Plot the input tensor with the optional bounding box and save it to disk. + Args: + tensor (tensor): a tensor with shape of `NxCxHxW`. + bboxes (tuple): bounding boxes with format of [[x, y, h, w]]. + texts (tuple): a tuple of string to plot. + path (str): path to the image to save to. + """ + tensor = tensor - tensor.min() + tensor = tensor / tensor.max() + f, ax = plt.subplots(nrows=1, ncols=tensor.shape[0], figsize=(50, 20)) + for i in range(tensor.shape[0]): + ax[i].axis("off") + ax[i].imshow(tensor[i].permute(1, 2, 0)) + # ax[1][0].axis('off') + if bboxes is not None and len(bboxes) > i: + for box in bboxes[i]: + x1, y1, x2, y2 = box + ax[i].vlines(x1, y1, y2, colors="g", linestyles="solid") + ax[i].vlines(x2, y1, y2, colors="g", linestyles="solid") + ax[i].hlines(y1, x1, x2, colors="g", linestyles="solid") + ax[i].hlines(y2, x1, x2, colors="g", linestyles="solid") + + if texts is not None and len(texts) > i: + ax[i].text(0, 0, texts[i]) + f.savefig(path) + + +def frozen_bn_stats(model): + """ + Set all the bn layers to eval mode. + Args: + model (model): model to set bn layers to eval mode. + """ + for m in model.modules(): + if isinstance(m, nn.BatchNorm3d): + m.eval() + + +def aggregate_sub_bn_stats(module): + """ + Recursively find all SubBN modules and aggregate sub-BN stats. + Args: + module (nn.Module) + Returns: + count (int): number of SubBN module found. + """ + count = 0 + for child in module.children(): + if isinstance(child, SubBatchNorm3d): + child.aggregate_stats() + count += 1 + else: + count += aggregate_sub_bn_stats(child) + return count + + +def launch_job(cfg, init_method, func, daemon=False): + """ + Run 'func' on one or more GPUs, specified in cfg + Args: + cfg (CfgNode): configs. Details can be found in + slowfast/config/defaults.py + init_method (str): initialization method to launch the job with multiple + devices. + func (function): job to run on GPU(s) + daemon (bool): The spawned processes’ daemon flag. If set to True, + daemonic processes will be created + """ + if cfg.NUM_GPUS > 1: + torch.multiprocessing.spawn( + mpu.run, + nprocs=cfg.NUM_GPUS, + args=( + cfg.NUM_GPUS, + func, + init_method, + cfg.SHARD_ID, + cfg.NUM_SHARDS, + cfg.DIST_BACKEND, + cfg, + ), + daemon=daemon, + ) + else: + func(cfg=cfg) + + +def get_class_names(path, parent_path=None, subset_path=None): + """ + Read json file with entries {classname: index} and return + an array of class names in order. + If parent_path is provided, load and map all children to their ids. + Args: + path (str): path to class ids json file. + File must be in the format {"class1": id1, "class2": id2, ...} + parent_path (Optional[str]): path to parent-child json file. + File must be in the format {"parent1": ["child1", "child2", ...], ...} + subset_path (Optional[str]): path to text file containing a subset + of class names, separated by newline characters. + Returns: + class_names (list of strs): list of class names. + class_parents (dict): a dictionary where key is the name of the parent class + and value is a list of ids of the children classes. + subset_ids (list of ints): list of ids of the classes provided in the + subset file. + """ + try: + with PathManager.open(path, "r") as f: + class2idx = json.load(f) + except Exception as err: + print("Fail to load file from {} with error {}".format(path, err)) + return + + max_key = max(class2idx.values()) + class_names = [None] * (max_key + 1) + + for k, i in class2idx.items(): + class_names[i] = k + + class_parent = None + if parent_path is not None and parent_path != "": + try: + with PathManager.open(parent_path, "r") as f: + d_parent = json.load(f) + except EnvironmentError as err: + print( + "Fail to load file from {} with error {}".format( + parent_path, err + ) + ) + return + class_parent = {} + for parent, children in d_parent.items(): + indices = [ + class2idx[c] for c in children if class2idx.get(c) is not None + ] + class_parent[parent] = indices + + subset_ids = None + if subset_path is not None and subset_path != "": + try: + with PathManager.open(subset_path, "r") as f: + subset = f.read().split("\n") + subset_ids = [ + class2idx[name] + for name in subset + if class2idx.get(name) is not None + ] + except EnvironmentError as err: + print( + "Fail to load file from {} with error {}".format( + subset_path, err + ) + ) + return + + return class_names, class_parent, subset_ids diff --git a/training/detectors/utils/slowfast/utils/multigrid.py b/training/detectors/utils/slowfast/utils/multigrid.py new file mode 100644 index 0000000000000000000000000000000000000000..4aed24bb4889d30960cec5ca94ab20a73b40b9e1 --- /dev/null +++ b/training/detectors/utils/slowfast/utils/multigrid.py @@ -0,0 +1,240 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Helper functions for multigrid training.""" + +import numpy as np + +import slowfast.utils.logging as logging + +logger = logging.get_logger(__name__) + + +class MultigridSchedule(object): + """ + This class defines multigrid training schedule and update cfg accordingly. + """ + + def init_multigrid(self, cfg): + """ + Update cfg based on multigrid settings. + Args: + cfg (configs): configs that contains training and multigrid specific + hyperparameters. Details can be seen in + slowfast/config/defaults.py. + Returns: + cfg (configs): the updated cfg. + """ + self.schedule = None + # We may modify cfg.TRAIN.BATCH_SIZE, cfg.DATA.NUM_FRAMES, and + # cfg.DATA.TRAIN_CROP_SIZE during training, so we store their original + # value in cfg and use them as global variables. + cfg.MULTIGRID.DEFAULT_B = cfg.TRAIN.BATCH_SIZE + cfg.MULTIGRID.DEFAULT_T = cfg.DATA.NUM_FRAMES + cfg.MULTIGRID.DEFAULT_S = cfg.DATA.TRAIN_CROP_SIZE + + if cfg.MULTIGRID.LONG_CYCLE: + self.schedule = self.get_long_cycle_schedule(cfg) + cfg.SOLVER.STEPS = [0] + [s[-1] for s in self.schedule] + # Fine-tuning phase. + cfg.SOLVER.STEPS[-1] = ( + cfg.SOLVER.STEPS[-2] + cfg.SOLVER.STEPS[-1] + ) // 2 + cfg.SOLVER.LRS = [ + cfg.SOLVER.GAMMA ** s[0] * s[1][0] for s in self.schedule + ] + # Fine-tuning phase. + cfg.SOLVER.LRS = cfg.SOLVER.LRS[:-1] + [ + cfg.SOLVER.LRS[-2], + cfg.SOLVER.LRS[-1], + ] + + cfg.SOLVER.MAX_EPOCH = self.schedule[-1][-1] + + elif cfg.MULTIGRID.SHORT_CYCLE: + cfg.SOLVER.STEPS = [ + int(s * cfg.MULTIGRID.EPOCH_FACTOR) for s in cfg.SOLVER.STEPS + ] + cfg.SOLVER.MAX_EPOCH = int( + cfg.SOLVER.MAX_EPOCH * cfg.MULTIGRID.EPOCH_FACTOR + ) + return cfg + + def update_long_cycle(self, cfg, cur_epoch): + """ + Before every epoch, check if long cycle shape should change. If it + should, update cfg accordingly. + Args: + cfg (configs): configs that contains training and multigrid specific + hyperparameters. Details can be seen in + slowfast/config/defaults.py. + cur_epoch (int): current epoch index. + Returns: + cfg (configs): the updated cfg. + changed (bool): do we change long cycle shape at this epoch? + """ + base_b, base_t, base_s = get_current_long_cycle_shape( + self.schedule, cur_epoch + ) + if base_s != cfg.DATA.TRAIN_CROP_SIZE or base_t != cfg.DATA.NUM_FRAMES: + + cfg.DATA.NUM_FRAMES = base_t + cfg.DATA.TRAIN_CROP_SIZE = base_s + cfg.TRAIN.BATCH_SIZE = base_b * cfg.MULTIGRID.DEFAULT_B + + bs_factor = ( + float(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS) + / cfg.MULTIGRID.BN_BASE_SIZE + ) + + if bs_factor < 1: + cfg.BN.NORM_TYPE = "sync_batchnorm" + cfg.BN.NUM_SYNC_DEVICES = int(1.0 / bs_factor) + elif bs_factor > 1: + cfg.BN.NORM_TYPE = "sub_batchnorm" + cfg.BN.NUM_SPLITS = int(bs_factor) + else: + cfg.BN.NORM_TYPE = "batchnorm" + + cfg.MULTIGRID.LONG_CYCLE_SAMPLING_RATE = cfg.DATA.SAMPLING_RATE * ( + cfg.MULTIGRID.DEFAULT_T // cfg.DATA.NUM_FRAMES + ) + logger.info("Long cycle updates:") + logger.info("\tBN.NORM_TYPE: {}".format(cfg.BN.NORM_TYPE)) + if cfg.BN.NORM_TYPE == "sync_batchnorm": + logger.info( + "\tBN.NUM_SYNC_DEVICES: {}".format(cfg.BN.NUM_SYNC_DEVICES) + ) + elif cfg.BN.NORM_TYPE == "sub_batchnorm": + logger.info("\tBN.NUM_SPLITS: {}".format(cfg.BN.NUM_SPLITS)) + logger.info("\tTRAIN.BATCH_SIZE: {}".format(cfg.TRAIN.BATCH_SIZE)) + logger.info( + "\tDATA.NUM_FRAMES x LONG_CYCLE_SAMPLING_RATE: {}x{}".format( + cfg.DATA.NUM_FRAMES, cfg.MULTIGRID.LONG_CYCLE_SAMPLING_RATE + ) + ) + logger.info( + "\tDATA.TRAIN_CROP_SIZE: {}".format(cfg.DATA.TRAIN_CROP_SIZE) + ) + return cfg, True + else: + return cfg, False + + def get_long_cycle_schedule(self, cfg): + """ + Based on multigrid hyperparameters, define the schedule of a long cycle. + Args: + cfg (configs): configs that contains training and multigrid specific + hyperparameters. Details can be seen in + slowfast/config/defaults.py. + Returns: + schedule (list): Specifies a list long cycle base shapes and their + corresponding training epochs. + """ + + steps = cfg.SOLVER.STEPS + + default_size = float( + cfg.DATA.NUM_FRAMES * cfg.DATA.TRAIN_CROP_SIZE ** 2 + ) + default_iters = steps[-1] + + # Get shapes and average batch size for each long cycle shape. + avg_bs = [] + all_shapes = [] + for t_factor, s_factor in cfg.MULTIGRID.LONG_CYCLE_FACTORS: + base_t = int(round(cfg.DATA.NUM_FRAMES * t_factor)) + base_s = int(round(cfg.DATA.TRAIN_CROP_SIZE * s_factor)) + if cfg.MULTIGRID.SHORT_CYCLE: + shapes = [ + [ + base_t, + cfg.MULTIGRID.DEFAULT_S + * cfg.MULTIGRID.SHORT_CYCLE_FACTORS[0], + ], + [ + base_t, + cfg.MULTIGRID.DEFAULT_S + * cfg.MULTIGRID.SHORT_CYCLE_FACTORS[1], + ], + [base_t, base_s], + ] + else: + shapes = [[base_t, base_s]] + + # (T, S) -> (B, T, S) + shapes = [ + [int(round(default_size / (s[0] * s[1] * s[1]))), s[0], s[1]] + for s in shapes + ] + avg_bs.append(np.mean([s[0] for s in shapes])) + all_shapes.append(shapes) + + # Get schedule regardless of cfg.MULTIGRID.EPOCH_FACTOR. + total_iters = 0 + schedule = [] + for step_index in range(len(steps) - 1): + step_epochs = steps[step_index + 1] - steps[step_index] + + for long_cycle_index, shapes in enumerate(all_shapes): + cur_epochs = ( + step_epochs * avg_bs[long_cycle_index] / sum(avg_bs) + ) + + cur_iters = cur_epochs / avg_bs[long_cycle_index] + total_iters += cur_iters + schedule.append((step_index, shapes[-1], cur_epochs)) + + iter_saving = default_iters / total_iters + + final_step_epochs = cfg.SOLVER.MAX_EPOCH - steps[-1] + + # We define the fine-tuning phase to have the same amount of iteration + # saving as the rest of the training. + ft_epochs = final_step_epochs / iter_saving * avg_bs[-1] + + schedule.append((step_index + 1, all_shapes[-1][2], ft_epochs)) + + # Obtrain final schedule given desired cfg.MULTIGRID.EPOCH_FACTOR. + x = ( + cfg.SOLVER.MAX_EPOCH + * cfg.MULTIGRID.EPOCH_FACTOR + / sum(s[-1] for s in schedule) + ) + + final_schedule = [] + total_epochs = 0 + for s in schedule: + epochs = s[2] * x + total_epochs += epochs + final_schedule.append((s[0], s[1], int(round(total_epochs)))) + print_schedule(final_schedule) + return final_schedule + + +def print_schedule(schedule): + """ + Log schedule. + """ + logger.info("Long cycle index\tBase shape\tEpochs") + for s in schedule: + logger.info("{}\t{}\t{}".format(s[0], s[1], s[2])) + + +def get_current_long_cycle_shape(schedule, epoch): + """ + Given a schedule and epoch index, return the long cycle base shape. + Args: + schedule (configs): configs that contains training and multigrid specific + hyperparameters. Details can be seen in + slowfast/config/defaults.py. + cur_epoch (int): current epoch index. + Returns: + shapes (list): A list describing the base shape in a long cycle: + [batch size relative to default, + number of frames, spatial dimension]. + """ + for s in schedule: + if epoch < s[-1]: + return s[1] + return schedule[-1][1] diff --git a/training/detectors/utils/slowfast/utils/multiprocessing.py b/training/detectors/utils/slowfast/utils/multiprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..a56aa603697dd3a6a37871ddd4407523aeebbb8c --- /dev/null +++ b/training/detectors/utils/slowfast/utils/multiprocessing.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Multiprocessing helpers.""" + +import torch + + +def run( + local_rank, num_proc, func, init_method, shard_id, num_shards, backend, cfg +): + """ + Runs a function from a child process. + Args: + local_rank (int): rank of the current process on the current machine. + num_proc (int): number of processes per machine. + func (function): function to execute on each of the process. + init_method (string): method to initialize the distributed training. + TCP initialization: equiring a network address reachable from all + processes followed by the port. + Shared file-system initialization: makes use of a file system that + is shared and visible from all machines. The URL should start with + file:// and contain a path to a non-existent file on a shared file + system. + shard_id (int): the rank of the current machine. + num_shards (int): number of overall machines for the distributed + training job. + backend (string): three distributed backends ('nccl', 'gloo', 'mpi') are + supports, each with different capabilities. Details can be found + here: + https://pytorch.org/docs/stable/distributed.html + cfg (CfgNode): configs. Details can be found in + slowfast/config/defaults.py + """ + # Initialize the process group. + world_size = num_proc * num_shards + rank = shard_id * num_proc + local_rank + + try: + torch.distributed.init_process_group( + backend=backend, + init_method=init_method, + world_size=world_size, + rank=rank, + ) + except Exception as e: + raise e + + torch.cuda.set_device(local_rank) + func(cfg) diff --git a/training/detectors/utils/slowfast/utils/parser.py b/training/detectors/utils/slowfast/utils/parser.py new file mode 100644 index 0000000000000000000000000000000000000000..06b4373e3b3736ceb310eed465fbc75e3bae1eb5 --- /dev/null +++ b/training/detectors/utils/slowfast/utils/parser.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Argument parser functions.""" + +import argparse +import sys + +import slowfast.utils.checkpoint as cu +from slowfast.config.defaults import get_cfg + + +def parse_args(): + """ + Parse the following arguments for a default parser for PySlowFast users. + Args: + shard_id (int): shard id for the current machine. Starts from 0 to + num_shards - 1. If single machine is used, then set shard id to 0. + num_shards (int): number of shards using by the job. + init_method (str): initialization method to launch the job with multiple + devices. Options includes TCP or shared file-system for + initialization. details can be find in + https://pytorch.org/docs/stable/distributed.html#tcp-initialization + cfg (str): path to the config file. + opts (argument): provide addtional options from the command line, it + overwrites the config loaded from file. + """ + parser = argparse.ArgumentParser( + description="Provide SlowFast video training and testing pipeline." + ) + parser.add_argument( + "--shard_id", + help="The shard id of current node, Starts from 0 to num_shards - 1", + default=0, + type=int, + ) + parser.add_argument( + "--num_shards", + help="Number of shards using by the job", + default=1, + type=int, + ) + parser.add_argument( + "--init_method", + help="Initialization method, includes TCP or shared file-system", + default="tcp://localhost:9999", + type=str, + ) + parser.add_argument( + "--cfg", + dest="cfg_file", + help="Path to the config file", + default="configs/Kinetics/SLOWFAST_4x16_R50.yaml", + type=str, + ) + parser.add_argument( + "opts", + help="See slowfast/config/defaults.py for all options", + default=None, + nargs=argparse.REMAINDER, + ) + if len(sys.argv) == 1: + parser.print_help() + return parser.parse_args() + + +def load_config(args): + """ + Given the arguemnts, load and initialize the configs. + Args: + args (argument): arguments includes `shard_id`, `num_shards`, + `init_method`, `cfg_file`, and `opts`. + """ + # Setup cfg. + cfg = get_cfg() + # Load config from cfg. + if args.cfg_file is not None: + cfg.merge_from_file(args.cfg_file) + # Load config from command line, overwrite config from opts. + if args.opts is not None: + cfg.merge_from_list(args.opts) + + # Inherit parameters from args. + if hasattr(args, "num_shards") and hasattr(args, "shard_id"): + cfg.NUM_SHARDS = args.num_shards + cfg.SHARD_ID = args.shard_id + if hasattr(args, "rng_seed"): + cfg.RNG_SEED = args.rng_seed + if hasattr(args, "output_dir"): + cfg.OUTPUT_DIR = args.output_dir + + # Create the checkpoint dir. + cu.make_checkpoint_dir(cfg.OUTPUT_DIR) + return cfg diff --git a/training/detectors/utils/slowfast/utils/weight_init_helper.py b/training/detectors/utils/slowfast/utils/weight_init_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..0b5544a70529f5dd1b06ba05a6aca4c7f508bdf3 --- /dev/null +++ b/training/detectors/utils/slowfast/utils/weight_init_helper.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Utility function for weight initialization""" + +import torch.nn as nn +from fvcore.nn.weight_init import c2_msra_fill + + +def init_weights(model, fc_init_std=0.01, zero_init_final_bn=True): + """ + Performs ResNet style weight initialization. + Args: + fc_init_std (float): the expected standard deviation for fc layer. + zero_init_final_bn (bool): if True, zero initialize the final bn for + every bottleneck. + """ + for m in model.modules(): + if isinstance(m, nn.Conv3d): + """ + Follow the initialization method proposed in: + {He, Kaiming, et al. + "Delving deep into rectifiers: Surpassing human-level + performance on imagenet classification." + arXiv preprint arXiv:1502.01852 (2015)} + """ + c2_msra_fill(m) + elif isinstance(m, nn.BatchNorm3d): + if ( + hasattr(m, "transform_final_bn") + and m.transform_final_bn + and zero_init_final_bn + ): + batchnorm_weight = 0.0 + else: + batchnorm_weight = 1.0 + if m.weight is not None: + m.weight.data.fill_(batchnorm_weight) + if m.bias is not None: + m.bias.data.zero_() + if isinstance(m, nn.Linear): + m.weight.data.normal_(mean=0.0, std=fc_init_std) + m.bias.data.zero_() diff --git a/training/detectors/xception_detector.py b/training/detectors/xception_detector.py new file mode 100644 index 0000000000000000000000000000000000000000..3e6b7d6bee4abb38c9e85c7fc94a3bc5b8f57acb --- /dev/null +++ b/training/detectors/xception_detector.py @@ -0,0 +1,117 @@ +''' +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2023-0706 +# description: Class for the XceptionDetector + +Functions in the Class are summarized as: +1. __init__: Initialization +2. build_backbone: Backbone-building +3. build_loss: Loss-function-building +4. features: Feature-extraction +5. classifier: Classification +6. get_losses: Loss-computation +7. get_train_metrics: Training-metrics-computation +8. get_test_metrics: Testing-metrics-computation +9. forward: Forward-propagation + +Reference: +@inproceedings{rossler2019faceforensics++, + title={Faceforensics++: Learning to detect manipulated facial images}, + author={Rossler, Andreas and Cozzolino, Davide and Verdoliva, Luisa and Riess, Christian and Thies, Justus and Nie{\ss}ner, Matthias}, + booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, + pages={1--11}, + year={2019} +} +''' + +import os +import datetime +import logging +import numpy as np +from sklearn import metrics +from typing import Union +from collections import defaultdict + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torch.nn import DataParallel +from torch.utils.tensorboard import SummaryWriter + +from metrics.base_metrics_class import calculate_metrics_for_train + +from .base_detector import AbstractDetector +from detectors import DETECTOR +from networks import BACKBONE +from loss import LOSSFUNC + +logger = logging.getLogger(__name__) + +@DETECTOR.register_module(module_name='xception') +class XceptionDetector(AbstractDetector): + def __init__(self, config): + super().__init__() + self.config = config + self.backbone = self.build_backbone(config) + self.loss_func = self.build_loss(config) + self.prob, self.label = [], [] + self.video_names = [] + self.correct, self.total = 0, 0 + + def build_backbone(self, config): + # prepare the backbone + backbone_class = BACKBONE[config['backbone_name']] + model_config = config['backbone_config'] + backbone = backbone_class(model_config) + # if donot load the pretrained weights, fail to get good results + state_dict = torch.load(config['pretrained']) + for name, weights in state_dict.items(): + if 'pointwise' in name: + state_dict[name] = weights.unsqueeze(-1).unsqueeze(-1) + state_dict = {k:v for k, v in state_dict.items() if 'fc' not in k} + backbone.load_state_dict(state_dict, False) + logger.info('Load pretrained model successfully!') + return backbone + + def build_loss(self, config): + # prepare the loss function + loss_class = LOSSFUNC[config['loss_func']] + loss_func = loss_class() + return loss_func + + def features(self, data_dict: dict) -> torch.tensor: + return self.backbone.features(data_dict['image']) #32,3,256,256 + + def classifier(self, features: torch.tensor) -> torch.tensor: + return self.backbone.classifier(features) + + def get_losses(self, data_dict: dict, pred_dict: dict) -> dict: + label = data_dict['label'] + pred = pred_dict['cls'] + loss = self.loss_func(pred, label) + overall_loss = loss + loss_dict = {'overall': overall_loss, 'cls': loss,} + return loss_dict + + def get_train_metrics(self, data_dict: dict, pred_dict: dict) -> dict: + label = data_dict['label'] + pred = pred_dict['cls'] + # compute metrics for batch data + auc, eer, acc, ap = calculate_metrics_for_train(label.detach(), pred.detach()) + metric_batch_dict = {'acc': acc, 'auc': auc, 'eer': eer, 'ap': ap} + # we dont compute the video-level metrics for training + self.video_names = [] + return metric_batch_dict + + def forward(self, data_dict: dict, inference=False) -> dict: + # get the features by backbone + features = self.features(data_dict) + # get the prediction by classifier + pred = self.classifier(features) + # get the probability of the pred + prob = torch.softmax(pred, dim=1)[:, 1] + # build the prediction dict for each output + pred_dict = {'cls': pred, 'prob': prob, 'feat': features} + return pred_dict diff --git a/training/lib/component/MCT/template0.png b/training/lib/component/MCT/template0.png new file mode 100644 index 0000000000000000000000000000000000000000..bc2b450c2d80980c30aebac10201bda91e9d0e02 Binary files /dev/null and b/training/lib/component/MCT/template0.png differ diff --git a/training/lib/component/MCT/template1.png b/training/lib/component/MCT/template1.png new file mode 100644 index 0000000000000000000000000000000000000000..ce86a7f4b5b045f3222d43414c8a3a2c0d189a11 Binary files /dev/null and b/training/lib/component/MCT/template1.png differ diff --git a/training/lib/component/MCT/template2.png b/training/lib/component/MCT/template2.png new file mode 100644 index 0000000000000000000000000000000000000000..4917a2ea0b39dec9ac5f1233b0c7c58d04b681ca Binary files /dev/null and b/training/lib/component/MCT/template2.png differ diff --git a/training/lib/component/MCT/template3.png b/training/lib/component/MCT/template3.png new file mode 100644 index 0000000000000000000000000000000000000000..03d3fd0c66ee60474397592feea9835ffe51270a Binary files /dev/null and b/training/lib/component/MCT/template3.png differ diff --git a/training/lib/component/MCT/template4.png b/training/lib/component/MCT/template4.png new file mode 100644 index 0000000000000000000000000000000000000000..44701c0a389cf0fa24c3ad770fdecac87317ce96 Binary files /dev/null and b/training/lib/component/MCT/template4.png differ diff --git a/training/lib/component/MCT/template5.png b/training/lib/component/MCT/template5.png new file mode 100644 index 0000000000000000000000000000000000000000..13e3ee65ad94c455dcadc1f0c75414ee8da9cd6c Binary files /dev/null and b/training/lib/component/MCT/template5.png differ diff --git a/training/lib/component/MCT/template6.png b/training/lib/component/MCT/template6.png new file mode 100644 index 0000000000000000000000000000000000000000..dd239d79c99e43be0cdbe0c58b75b03079ee47e8 Binary files /dev/null and b/training/lib/component/MCT/template6.png differ diff --git a/training/lib/component/MCT/template7.png b/training/lib/component/MCT/template7.png new file mode 100644 index 0000000000000000000000000000000000000000..ee4d7f7d8007319ed756b540af15247d801042c6 Binary files /dev/null and b/training/lib/component/MCT/template7.png differ diff --git a/training/lib/component/MCT/template8.png b/training/lib/component/MCT/template8.png new file mode 100644 index 0000000000000000000000000000000000000000..0421aed95af01c9dd6d4cd6c2ff58ac70dba9db1 Binary files /dev/null and b/training/lib/component/MCT/template8.png differ diff --git a/training/lib/component/MCT/template9.png b/training/lib/component/MCT/template9.png new file mode 100644 index 0000000000000000000000000000000000000000..92554ac8cc1eb0bd6be5ea6a85f29ab43febe5d5 Binary files /dev/null and b/training/lib/component/MCT/template9.png differ diff --git a/training/lib/component/__init__.py b/training/lib/component/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..676145d777810e4a51bdaf59fdec4f5358aae349 --- /dev/null +++ b/training/lib/component/__init__.py @@ -0,0 +1,7 @@ +import os +import sys +current_file_path = os.path.abspath(__file__) +parent_dir = os.path.dirname(os.path.dirname(current_file_path)) +project_root_dir = os.path.dirname(parent_dir) +sys.path.append(parent_dir) +sys.path.append(project_root_dir) diff --git a/training/lib/component/attention.py b/training/lib/component/attention.py new file mode 100644 index 0000000000000000000000000000000000000000..f74bd67c0364812ffec0d40c0235062a80f206ad --- /dev/null +++ b/training/lib/component/attention.py @@ -0,0 +1,350 @@ + +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class ChannelAttention(nn.Module): + def __init__(self, in_planes, ratio=8): + super(ChannelAttention, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.max_pool = nn.AdaptiveMaxPool2d(1) + + self.sharedMLP = nn.Sequential( + nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False), + nn.ReLU(), + nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False)) + self.sigmoid = nn.Sigmoid() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.xavier_normal_(m.weight.data, gain=0.02) + + def forward(self, x): + avgout = self.sharedMLP(self.avg_pool(x)) + maxout = self.sharedMLP(self.max_pool(x)) + return self.sigmoid(avgout + maxout) + + +class SpatialAttention(nn.Module): + def __init__(self, kernel_size=7): + super(SpatialAttention, self).__init__() + assert kernel_size in (3, 7), "kernel size must be 3 or 7" + padding = 3 if kernel_size == 7 else 1 + + self.conv = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False) + self.sigmoid = nn.Sigmoid() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.xavier_normal_(m.weight.data, gain=0.02) + + def forward(self, x): + avgout = torch.mean(x, dim=1, keepdim=True) + maxout, _ = torch.max(x, dim=1, keepdim=True) + x = torch.cat([avgout, maxout], dim=1) + x = self.conv(x) + return self.sigmoid(x) + + +class Self_Attn(nn.Module): + """ Self attention Layer""" + + def __init__(self, in_dim, out_dim=None, add=False, ratio=8): + super(Self_Attn, self).__init__() + self.chanel_in = in_dim + self.add = add + if out_dim is None: + out_dim = in_dim + self.out_dim = out_dim + # self.activation = activation + + self.query_conv = nn.Conv2d( + in_channels=in_dim, out_channels=in_dim//ratio, kernel_size=1) + self.key_conv = nn.Conv2d( + in_channels=in_dim, out_channels=in_dim//ratio, kernel_size=1) + self.value_conv = nn.Conv2d( + in_channels=in_dim, out_channels=out_dim, kernel_size=1) + self.gamma = nn.Parameter(torch.zeros(1)) + + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x): + """ + inputs : + x : input feature maps( B X C X W X H) + returns : + out : self attention value + input feature + attention: B X N X N (N is Width*Height) + """ + m_batchsize, C, width, height = x.size() + proj_query = self.query_conv(x).view( + m_batchsize, -1, width*height).permute(0, 2, 1) # B X C X(N) + proj_key = self.key_conv(x).view( + m_batchsize, -1, width*height) # B X C x (*W*H) + energy = torch.bmm(proj_query, proj_key) # transpose check + attention = self.softmax(energy) # BX (N) X (N) + proj_value = self.value_conv(x).view( + m_batchsize, -1, width*height) # B X C X N + + out = torch.bmm(proj_value, attention.permute(0, 2, 1)) + out = out.view(m_batchsize, self.out_dim, width, height) + + if self.add: + out = self.gamma*out + x + else: + out = self.gamma*out + return out # , attention + + +class CrossModalAttention(nn.Module): + """ CMA attention Layer""" + + def __init__(self, in_dim, activation=None, ratio=8, cross_value=True): + super(CrossModalAttention, self).__init__() + self.chanel_in = in_dim + self.activation = activation + self.cross_value = cross_value + + self.query_conv = nn.Conv2d( + in_channels=in_dim, out_channels=in_dim//ratio, kernel_size=1) + self.key_conv = nn.Conv2d( + in_channels=in_dim, out_channels=in_dim//ratio, kernel_size=1) + self.value_conv = nn.Conv2d( + in_channels=in_dim, out_channels=in_dim, kernel_size=1) + self.gamma = nn.Parameter(torch.zeros(1)) + + self.softmax = nn.Softmax(dim=-1) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.xavier_normal_(m.weight.data, gain=0.02) + + def forward(self, x, y): + """ + inputs : + x : input feature maps( B X C X W X H) + returns : + out : self attention value + input feature + attention: B X N X N (N is Width*Height) + """ + B, C, H, W = x.size() + + proj_query = self.query_conv(x).view( + B, -1, H*W).permute(0, 2, 1) # B , HW, C + proj_key = self.key_conv(y).view( + B, -1, H*W) # B X C x (*W*H) + energy = torch.bmm(proj_query, proj_key) # B, HW, HW + attention = self.softmax(energy) # BX (N) X (N) + if self.cross_value: + proj_value = self.value_conv(y).view( + B, -1, H*W) # B , C , HW + else: + proj_value = self.value_conv(x).view( + B, -1, H*W) # B , C , HW + + out = torch.bmm(proj_value, attention.permute(0, 2, 1)) + out = out.view(B, C, H, W) + + out = self.gamma*out + x + + if self.activation is not None: + out = self.activation(out) + + return out # , attention + +class DualCrossModalAttention(nn.Module): + """ Dual CMA attention Layer""" + + def __init__(self, in_dim, activation=None, size=16, ratio=8, ret_att=False): + super(DualCrossModalAttention, self).__init__() + self.chanel_in = in_dim + self.activation = activation + self.ret_att = ret_att + + # query conv + self.key_conv1 = nn.Conv2d( + in_channels=in_dim, out_channels=in_dim//ratio, kernel_size=1) + self.key_conv2 = nn.Conv2d( + in_channels=in_dim, out_channels=in_dim//ratio, kernel_size=1) + self.key_conv_share = nn.Conv2d( + in_channels=in_dim//ratio, out_channels=in_dim//ratio, kernel_size=1) + + self.linear1 = nn.Linear(size*size, size*size) + self.linear2 = nn.Linear(size*size, size*size) + + # separated value conv + self.value_conv1 = nn.Conv2d( + in_channels=in_dim, out_channels=in_dim, kernel_size=1) + self.gamma1 = nn.Parameter(torch.zeros(1)) + + self.value_conv2 = nn.Conv2d( + in_channels=in_dim, out_channels=in_dim, kernel_size=1) + self.gamma2 = nn.Parameter(torch.zeros(1)) + + self.softmax = nn.Softmax(dim=-1) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.xavier_normal_(m.weight.data, gain=0.02) + if isinstance(m, nn.Linear): + nn.init.xavier_normal_(m.weight.data, gain=0.02) + + def forward(self, x, y): + """ + inputs : + x : input feature maps( B X C X W X H) + returns : + out : self attention value + input feature + attention: B X N X N (N is Width*Height) + """ + B, C, H, W = x.size() + + def _get_att(a, b): + proj_key1 = self.key_conv_share(self.key_conv1(a)).view( + B, -1, H*W).permute(0, 2, 1) # B , HW, C + proj_key2 = self.key_conv_share(self.key_conv2(b)).view( + B, -1, H*W) # B X C x (*W*H) + #print('proj_key1:', proj_key1[0][0][:5].cpu().detach().numpy()) + #print('proj_key2:', proj_key2[0][:5][0:5].cpu().detach().numpy()) + energy = torch.bmm(proj_key1, proj_key2) # B, HW, HW + #print('energy:', energy[0][0][:5].cpu().detach().numpy()) + attention1 = self.softmax(self.linear1(energy)) + attention2 = self.softmax(self.linear2(energy.permute(0,2,1))) # BX (N) X (N) + #print('1:', attention1[0]==attention1[1]) + #print('2:', attention2[0]==attention2[1]) + + return attention1, attention2 + + att_y_on_x, att_x_on_y = _get_att(x, y) + #print('att_y_on_x:', att_y_on_x[0][0][:5].cpu().detach().numpy()) + proj_value_y_on_x = self.value_conv2(y).view( + B, -1, H*W) # B , C , HW + out_y_on_x = torch.bmm(proj_value_y_on_x, att_y_on_x.permute(0, 2, 1)) + out_y_on_x = out_y_on_x.view(B, C, H, W) + out_x = self.gamma1*out_y_on_x + x + + proj_value_x_on_y = self.value_conv1(x).view( + B, -1, H*W) # B , C , HW + out_x_on_y = torch.bmm(proj_value_x_on_y, att_x_on_y.permute(0, 2, 1)) + out_x_on_y = out_x_on_y.view(B, C, H, W) + out_y = self.gamma2*out_x_on_y + y + + if self.ret_att: + return out_x, out_y, att_y_on_x, att_x_on_y + + return out_x, out_y # , attention + +class DualCrossModalAttention_old(nn.Module): + """ Dual CMA attention Layer""" + + def __init__(self, in_dim, activation=None, ratio=8, ret_att=False): + super(DualCrossModalAttention_old, self).__init__() + self.chanel_in = in_dim + self.activation = activation + self.ret_att = ret_att + + # shared query & key conv + self.query_conv = nn.Conv2d( + in_channels=in_dim, out_channels=in_dim//ratio, kernel_size=1) + self.key_conv = nn.Conv2d( + in_channels=in_dim, out_channels=in_dim//ratio, kernel_size=1) + + # separated value conv + self.value_conv1 = nn.Conv2d( + in_channels=in_dim, out_channels=in_dim, kernel_size=1) + self.gamma1 = nn.Parameter(torch.zeros(1)) + + self.value_conv2 = nn.Conv2d( + in_channels=in_dim, out_channels=in_dim, kernel_size=1) + self.gamma2 = nn.Parameter(torch.zeros(1)) + + self.softmax = nn.Softmax(dim=-1) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.xavier_normal_(m.weight.data, gain=0.02) + + def forward(self, x, y): + """ + inputs : + x : input feature maps( B X C X W X H) + returns : + out : self attention value + input feature + attention: B X N X N (N is Width*Height) + """ + B, C, H, W = x.size() + + def _get_att(q, k): + proj_query = self.query_conv(q).view( + B, -1, H*W).permute(0, 2, 1) # B , HW, C + proj_key = self.key_conv(k).view( + B, -1, H*W) # B X C x (*W*H) + #print('proj_key:', proj_key[0][0][:5].cpu().detach().numpy()) + energy = torch.bmm(proj_query, proj_key) # B, HW, HW + #print('energy:', energy[0][0][:5].cpu().detach().numpy()) + attention = self.softmax(energy) # BX (N) X (N) + + return attention + + att_y_on_x = _get_att(x, y) + #print('att_y_on_x:', att_y_on_x[0][0][:5].cpu().detach().numpy()) + proj_value_y_on_x = self.value_conv2(y).view( + B, -1, H*W) # B , C , HW + out_y_on_x = torch.bmm(proj_value_y_on_x, att_y_on_x.permute(0, 2, 1)) + out_y_on_x = out_y_on_x.view(B, C, H, W) + out_x = self.gamma1*out_y_on_x + x + + att_x_on_y = _get_att(y, x) + proj_value_x_on_y = self.value_conv1(x).view( + B, -1, H*W) # B , C , HW + out_x_on_y = torch.bmm(proj_value_x_on_y, att_x_on_y.permute(0, 2, 1)) + out_x_on_y = out_x_on_y.view(B, C, H, W) + out_y = self.gamma2*out_x_on_y + y + + if self.ret_att: + return out_x, out_y, att_y_on_x, att_x_on_y + + return out_x, out_y # , attention + + + + +''' +class BasicBlock(nn.Module): + expansion = 1 + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.ca = ChannelAttention(planes) + self.sa = SpatialAttention() + self.downsample = downsample + self.stride = stride + def forward(self, x): + residual = x + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + out = self.conv2(out) + out = self.bn2(out) + out = self.ca(out) * out # 广播机制 + out = self.sa(out) * out # 广播机制 + if self.downsample is not None: + residual = self.downsample(x) + out += residual + out = self.relu(out) + return out +''' + +if __name__ == "__main__": + x = torch.rand(10, 768, 16, 16) + y = torch.rand(10, 768, 16, 16) + dcma = DualCrossModalAttention(768, ret_att=True) + out_x, out_y, att_y_on_x, att_x_on_y = dcma(x, y) + print(out_y.size()) + print(att_x_on_y.size()) diff --git a/training/lib/component/gaussian_ops.py b/training/lib/component/gaussian_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..57a7dda967810210c2f2ffc2c9ae1b54dca82bcd --- /dev/null +++ b/training/lib/component/gaussian_ops.py @@ -0,0 +1,117 @@ +import cv2 +import numpy as np +import math +import numbers +import torch +from torch import nn +from torch.nn import functional as F + + +class GaussianSmoothing(nn.Module): + """ + Apply gaussian smoothing on a + 1d, 2d or 3d tensor. Filtering is performed seperately for each channel + in the input using a depthwise convolution. + Arguments: + channels (int, sequence): Number of channels of the input tensors. Output will + have this number of channels as well. + kernel_size (int, sequence): Size of the gaussian kernel. + sigma (float, sequence): Standard deviation of the gaussian kernel. + dim (int, optional): The number of dimensions of the data. + Default value is 2 (spatial). + """ + + def __init__(self, channels, kernel_size, sigma=0.1, dim=2): + super(GaussianSmoothing, self).__init__() + self.kernel_size = kernel_size + if isinstance(kernel_size, numbers.Number): + kernel_size = [kernel_size] * dim + if isinstance(sigma, numbers.Number): + sigma = [sigma] * dim + + # The gaussian kernel is the product of the + # gaussian function of each dimension. + kernel = 1 + meshgrids = torch.meshgrid( + [ + torch.arange(size, dtype=torch.float32) + for size in kernel_size + ] + ) + for size, std, mgrid in zip(kernel_size, sigma, meshgrids): + mean = (size - 1) / 2 + kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \ + torch.exp(-((mgrid - mean) / std) ** 2 / 2) + + # Make sure sum of values in gaussian kernel equals 1. + kernel = kernel / torch.sum(kernel) + + # Reshape to depthwise convolutional weight + kernel = kernel.view(1, 1, *kernel.size()) + kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1)) + + self.register_buffer('weight', kernel) + self.groups = channels + + if dim == 1: + self.conv = F.conv1d + elif dim == 2: + self.conv = F.conv2d + elif dim == 3: + self.conv = F.conv3d + else: + raise RuntimeError( + 'Only 1, 2 and 3 dimensions are supported. Received {}.'.format( + dim) + ) + + def forward(self, input): + """ + Apply gaussian filter to input. + Arguments: + input (torch.Tensor): Input to apply gaussian filter on. + Returns: + filtered (torch.Tensor): Filtered output. + """ + if self.training: + return self.conv(input, weight=self.weight, groups=self.groups, padding=self.kernel_size//2) + else: + return input + +class GaussianNoise(nn.Module): + def __init__(self, mean=0, std=0.1, clip=1): + super(GaussianNoise, self).__init__() + self.mean = mean + self.std = std + self.clip = clip + + def forward(self, x): + if self.training: + noise = x.data.new(x.size()).normal_(self.mean, self.std) + return torch.clamp(x + noise, -self.clip, self.clip) + else: + return x + + +if __name__ == "__main__": + im = cv2.imread('E:\SRM\component\FF-F2F_0.png') + im_ten = im/255*2-1 + im_ten = torch.from_numpy(im_ten).unsqueeze(0).permute(0, 3, 1, 2).float() + blur = GaussianSmoothing(channels=3, kernel_size=7, sigma=0.8) + noise = GaussianNoise() + + noise_im = torch.clamp(noise(im_ten), -1, 1) + blur_im = blur(im_ten) + print(blur_im.size()) + + def t2im(t): + + t = (t+1)/2*255 + im = t.squeeze().cpu().numpy().transpose(1, 2, 0).astype(np.uint8) + return im + + cv2.imshow('ori', im) + cv2.imshow('blur', t2im(blur_im)) + cv2.imshow('noise', t2im(noise_im)) + + cv2.waitKey() diff --git a/training/lib/component/srm_conv.py b/training/lib/component/srm_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..2d60a41fd63fca643fae525ec2f6d3ed001b75d2 --- /dev/null +++ b/training/lib/component/srm_conv.py @@ -0,0 +1,194 @@ +# -------------------------------------------------------- +# Two Stream Faster R-CNN +# Licensed under The MIT License [see LICENSE for details] +# Written by Hangyan Jiang +# -------------------------------------------------------- + +# Testing part +import torch +import torch.nn as nn +import torch.nn.functional as F +import cv2 +from PIL import Image +import numpy as np +import matplotlib.pyplot as plt + +import argparse + + +class SRMConv2d(nn.Module): + + def __init__(self, learnable=False): + super(SRMConv2d, self).__init__() + self.weight = nn.Parameter(torch.Tensor(30, 3, 5, 5), + requires_grad=learnable) + self.bias = nn.Parameter(torch.Tensor(30), \ + requires_grad=learnable) + self.reset_parameters() + + def reset_parameters(self): + SRM_npy = np.load('lib/component/SRM_Kernels.npy') + # print(SRM_npy.shape) + SRM_npy = np.repeat(SRM_npy, 3, axis=1) + # print(SRM_npy.shape) + self.weight.data.numpy()[:] = SRM_npy + self.bias.data.zero_() + + def forward(self, input): + return F.conv2d(input, self.weight, stride=1, padding=2) + + + +class SRMConv2d_simple(nn.Module): + + def __init__(self, inc=3, learnable=False): + super(SRMConv2d_simple, self).__init__() + self.truc = nn.Hardtanh(-3, 3) + kernel = self._build_kernel(inc) # (3,3,5,5) + self.kernel = nn.Parameter(data=kernel, requires_grad=learnable) + # self.hor_kernel = self._build_kernel().transpose(0,1,3,2) + + def forward(self, x): + ''' + x: imgs (Batch, H, W, 3) + ''' + out = F.conv2d(x, self.kernel, stride=1, padding=2) + out = self.truc(out) + + return out + + def _build_kernel(self, inc): + # filter1: KB + filter1 = [[0, 0, 0, 0, 0], + [0, -1, 2, -1, 0], + [0, 2, -4, 2, 0], + [0, -1, 2, -1, 0], + [0, 0, 0, 0, 0]] + # filter2:KV + filter2 = [[-1, 2, -2, 2, -1], + [2, -6, 8, -6, 2], + [-2, 8, -12, 8, -2], + [2, -6, 8, -6, 2], + [-1, 2, -2, 2, -1]] + # # filter3:hor 2rd + filter3 = [[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 1, -2, 1, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]] + # filter3:hor 2rd + # filter3 = [[0, 0, 0, 0, 0], + # [0, 0, 1, 0, 0], + # [0, 1, -4, 1, 0], + # [0, 0, 1, 0, 0], + # [0, 0, 0, 0, 0]] + + filter1 = np.asarray(filter1, dtype=float) / 4. + filter2 = np.asarray(filter2, dtype=float) / 12. + filter3 = np.asarray(filter3, dtype=float) / 2. + # statck the filters + filters = [[filter1],#, filter1, filter1], + [filter2],#, filter2, filter2], + [filter3]]#, filter3, filter3]] # (3,3,5,5) + filters = np.array(filters) + filters = np.repeat(filters, inc, axis=1) + filters = torch.FloatTensor(filters) # (3,3,5,5) + return filters + +class SRMConv2d_Separate(nn.Module): + + def __init__(self, inc, outc, learnable=False): + super(SRMConv2d_Separate, self).__init__() + self.inc = inc + self.truc = nn.Hardtanh(-3, 3) + kernel = self._build_kernel(inc) # (3,3,5,5) + self.kernel = nn.Parameter(data=kernel, requires_grad=learnable) + # self.hor_kernel = self._build_kernel().transpose(0,1,3,2) + self.out_conv = nn.Sequential( + nn.Conv2d(3*inc, outc, 1, 1, 0, 1, 1, bias=False), + nn.BatchNorm2d(outc), + nn.ReLU(inplace=True) + ) + + for ly in self.out_conv.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + + def forward(self, x): + ''' + x: imgs (Batch,inc, H, W) + kernel: (outc,inc,kH,kW) + ''' + out = F.conv2d(x, self.kernel, stride=1, padding=2, groups=self.inc) + out = self.truc(out) + out = self.out_conv(out) + + return out + + def _build_kernel(self, inc): + # filter1: KB + filter1 = [[0, 0, 0, 0, 0], + [0, -1, 2, -1, 0], + [0, 2, -4, 2, 0], + [0, -1, 2, -1, 0], + [0, 0, 0, 0, 0]] + # filter2:KV + filter2 = [[-1, 2, -2, 2, -1], + [2, -6, 8, -6, 2], + [-2, 8, -12, 8, -2], + [2, -6, 8, -6, 2], + [-1, 2, -2, 2, -1]] + # # filter3:hor 2rd + filter3 = [[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 1, -2, 1, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]] + # filter3:hor 2rd + # filter3 = [[0, 0, 0, 0, 0], + # [0, 0, 1, 0, 0], + # [0, 1, -4, 1, 0], + # [0, 0, 1, 0, 0], + # [0, 0, 0, 0, 0]] + + filter1 = np.asarray(filter1, dtype=float) / 4. + filter2 = np.asarray(filter2, dtype=float) / 12. + filter3 = np.asarray(filter3, dtype=float) / 2. + # statck the filters + filters = [[filter1],#, filter1, filter1], + [filter2],#, filter2, filter2], + [filter3]]#, filter3, filter3]] # (3,3,5,5) => (3,1,5,5) + filters = np.array(filters) + # filters = np.repeat(filters, inc, axis=1) + filters = np.repeat(filters, inc, axis=0) + filters = torch.FloatTensor(filters) # (3*inc,1,5,5) + # print(filters.size()) + return filters + + +if __name__ == "__main__": + im = cv2.imread('E:\SRM\component\FF-F2F_0.png') + im_ten = im/255*2-1 + im_ten = torch.from_numpy(im_ten).unsqueeze(0).permute(0, 3, 1, 2).float() + # im_ten = torch.cat((im_ten, im_ten), dim=1) + srm_conv = SRMConv2d_simple(inc=3) + srm_conv1 = SRMConv2d_Separate(inc=3, outc=3) + + srm = srm_conv(im_ten) + print(srm.size()) + + def t2im(t): + + # t = (t+1)/2*255 + t = t*255 + im = t.squeeze().detach().cpu().numpy().transpose(1, 2, 0).astype(np.uint8) + return im + + cv2.imshow('ori', im) + cv2.imshow('srm', t2im(srm)) + cv2.imshow('srm1', t2im(srm_conv1(im_ten))) + # cv2.imshow('srm2', t2im(srm_conv(srm))) + + cv2.waitKey() + + diff --git a/training/logger.py b/training/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..9ee268d4874d1be51fc6e21b4e80f3b9f6e6a0f5 --- /dev/null +++ b/training/logger.py @@ -0,0 +1,36 @@ +import os +import logging + +import torch.distributed as dist + +class RankFilter(logging.Filter): + def __init__(self, rank): + super().__init__() + self.rank = rank + + def filter(self, record): + return dist.get_rank() == self.rank + +def create_logger(log_path): + # Create log path + if os.path.isdir(os.path.dirname(log_path)): + os.makedirs(os.path.dirname(log_path), exist_ok=True) + + # Create logger object + logger = logging.getLogger() + logger.setLevel(logging.INFO) + # Create file handler and set the formatter + fh = logging.FileHandler(log_path) + formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') + fh.setFormatter(formatter) + + # Add the file handler to the logger + logger.addHandler(fh) + + # Add a stream handler to print to console + sh = logging.StreamHandler() + sh.setLevel(logging.INFO) # Set logging level for stream handler + sh.setFormatter(formatter) + logger.addHandler(sh) + + return logger \ No newline at end of file diff --git a/training/loss/__init__.py b/training/loss/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..89507351529fe505515e81c1f48a44eca09c437a --- /dev/null +++ b/training/loss/__init__.py @@ -0,0 +1,24 @@ +import os +import sys +current_file_path = os.path.abspath(__file__) +parent_dir = os.path.dirname(os.path.dirname(current_file_path)) +project_root_dir = os.path.dirname(parent_dir) +sys.path.append(parent_dir) +sys.path.append(project_root_dir) + +from metrics.registry import LOSSFUNC + +from .cross_entropy_loss import CrossEntropyLoss +from .consistency_loss import ConsistencyCos +from .capsule_loss import CapsuleLoss +from .bce_loss import BCELoss +from .am_softmax import AMSoftmaxLoss +from .am_softmax import AMSoftmax_OHEM +from .contrastive_regularization import ContrastiveLoss +from .l1_loss import L1Loss +from .id_loss import IDLoss +from .vgg_loss import VGGLoss +from .js_loss import JS_Loss +from .patch_consistency_loss import PatchConsistencyLoss +from .region_independent_loss import RegionIndependentLoss +from .supercontrast_loss import SupConLoss diff --git a/training/loss/abstract_loss_func.py b/training/loss/abstract_loss_func.py new file mode 100644 index 0000000000000000000000000000000000000000..45d3324ed53be4310867b326e9eaabd265634138 --- /dev/null +++ b/training/loss/abstract_loss_func.py @@ -0,0 +1,17 @@ +import torch.nn as nn + +class AbstractLossClass(nn.Module): + """Abstract class for loss functions.""" + def __init__(self): + super(AbstractLossClass, self).__init__() + + def forward(self, pred, label): + """ + Args: + pred: prediction of the model + label: ground truth label + + Return: + loss: loss value + """ + raise NotImplementedError('Each subclass should implement the forward method.') diff --git a/training/loss/am_softmax.py b/training/loss/am_softmax.py new file mode 100644 index 0000000000000000000000000000000000000000..88b1df5236d19ecfddea8b1bc377733ff3aa6195 --- /dev/null +++ b/training/loss/am_softmax.py @@ -0,0 +1,145 @@ +""" + Copyright (c) 2018 Intel Corporation + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn import Parameter +import torch as th + +from .abstract_loss_func import AbstractLossClass +from metrics.registry import LOSSFUNC + + +#------------ AMSoftmax Loss ---------------------- + +def focal_loss(input_values, gamma): + """Computes the focal loss""" + p = torch.exp(-input_values) + loss = (1 - p) ** gamma * input_values + return loss.mean() + + +@LOSSFUNC.register_module(module_name="am_softmax") +class AMSoftmaxLoss(AbstractLossClass): + """Computes the AM-Softmax loss with cos or arc margin""" + margin_types = ['cos', 'arc'] + + def __init__(self, margin_type='cos', gamma=0., m=0.5, s=30, t=1.): + super().__init__() + assert margin_type in AMSoftmaxLoss.margin_types + self.margin_type = margin_type + assert gamma >= 0 + self.gamma = gamma + assert m > 0 + self.m = m + assert s > 0 + self.s = s + self.cos_m = math.cos(m) + self.sin_m = math.sin(m) + self.th = math.cos(math.pi - m) + assert t >= 1 + self.t = t + + def forward(self, cos_theta, target): + if self.margin_type == 'cos': + phi_theta = cos_theta - self.m + else: + sine = torch.sqrt(1.0 - torch.pow(cos_theta, 2)) + phi_theta = cos_theta * self.cos_m - sine * self.sin_m #cos(theta+m) + phi_theta = torch.where(cos_theta > self.th, phi_theta, cos_theta - self.sin_m * self.m) + + index = torch.zeros_like(cos_theta, dtype=torch.uint8) + index.scatter_(1, target.data.view(-1, 1), 1) + output = torch.where(index, phi_theta, cos_theta) + + if self.gamma == 0 and self.t == 1.: + return F.cross_entropy(self.s*output, target) + + if self.t > 1: + h_theta = self.t - 1 + self.t*cos_theta + support_vecs_mask = (1 - index) * \ + torch.lt(torch.masked_select(phi_theta, index).view(-1, 1).repeat(1, h_theta.shape[1]) - cos_theta, 0) + output = torch.where(support_vecs_mask, h_theta, output) + return F.cross_entropy(self.s*output, target) + + return focal_loss(F.cross_entropy(self.s*output, target, reduction='none'), self.gamma) + + +@LOSSFUNC.register_module(module_name="am_softmax_ohem") +class AMSoftmax_OHEM(AbstractLossClass): + """Computes the AM-Softmax loss with cos or arc margin""" + margin_types = ['cos', 'arc'] + + def __init__(self, margin_type='cos', gamma=0., m=0.5, s=30, t=1., ratio=1.): + super(self).__init__() + assert margin_type in AMSoftmaxLoss.margin_types + self.margin_type = margin_type + assert gamma >= 0 + self.gamma = gamma + assert m > 0 + self.m = m + assert s > 0 + self.s = s + self.cos_m = math.cos(m) + self.sin_m = math.sin(m) + self.th = math.cos(math.pi - m) + assert t >= 1 + self.t = t + self.ratio = ratio + + + # ------- online hard example mining -------------------- + def get_subidx(self,x,y,ratio): + num_inst = x.size(0) + num_hns = int(ratio * num_inst) + x_ = x.clone() + inst_losses = th.autograd.Variable(th.zeros(num_inst)).cuda() + + for idx, label in enumerate(y.data): + inst_losses[idx] = -x_.data[idx, label] + + _, idxs = inst_losses.topk(num_hns) + return idxs + + + def forward(self, cos_theta, target): + if self.margin_type == 'cos': + phi_theta = cos_theta - self.m + else: + sine = torch.sqrt(1.0 - torch.pow(cos_theta, 2)) + phi_theta = cos_theta * self.cos_m - sine * self.sin_m #cos(theta+m) + phi_theta = torch.where(cos_theta > self.th, phi_theta, cos_theta - self.sin_m * self.m) + + index = torch.zeros_like(cos_theta, dtype=torch.uint8) + index.scatter_(1, target.data.view(-1, 1), 1) + output = torch.where(index, phi_theta, cos_theta) + + out = F.log_softmax(output,dim=1) + idxs = self.get_subidx(out,target,self.ratio) # select hard examples + + output2 = output.index_select(0, idxs) + target2 = target.index_select(0, idxs) + + if self.gamma == 0 and self.t == 1.: + return F.cross_entropy(self.s*output2, target2) + + if self.t > 1: + h_theta = self.t - 1 + self.t*cos_theta + support_vecs_mask = (1 - index) * \ + torch.lt(torch.masked_select(phi_theta, index).view(-1, 1).repeat(1, h_theta.shape[1]) - cos_theta, 0) + output2 = torch.where(support_vecs_mask, h_theta, output2) + return F.cross_entropy(self.s*output2, target2) + + return focal_loss(F.cross_entropy(self.s*output2, target2, reduction='none'), self.gamma) \ No newline at end of file diff --git a/training/loss/bce_loss.py b/training/loss/bce_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..3641878fbe109fb4247f60d683401ce76a797bf7 --- /dev/null +++ b/training/loss/bce_loss.py @@ -0,0 +1,26 @@ +import torch.nn as nn +from .abstract_loss_func import AbstractLossClass +from metrics.registry import LOSSFUNC + + +@LOSSFUNC.register_module(module_name="bce") +class BCELoss(AbstractLossClass): + def __init__(self): + super().__init__() + self.loss_fn = nn.BCELoss() + + def forward(self, inputs, targets): + """ + Computes the bce loss. + + Args: + inputs: A PyTorch tensor of size (batch_size, num_classes) containing the predicted scores. + targets: A PyTorch tensor of size (batch_size) containing the ground-truth class indices. + + Returns: + A scalar tensor representing the bce loss. + """ + # Compute the bce loss + loss = self.loss_fn(inputs, targets.float()) + + return loss \ No newline at end of file diff --git a/training/loss/capsule_loss.py b/training/loss/capsule_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..df13bb3b768ec56ae4117fe58199461f295618b5 --- /dev/null +++ b/training/loss/capsule_loss.py @@ -0,0 +1,28 @@ +import torch.nn as nn +from .abstract_loss_func import AbstractLossClass +from metrics.registry import LOSSFUNC + + +@LOSSFUNC.register_module(module_name="capsule_loss") +class CapsuleLoss(AbstractLossClass): + def __init__(self): + super().__init__() + self.cross_entropy_loss = nn.CrossEntropyLoss() + + def forward(self, inputs, targets): + """ + Computes the capsule loss. + + Args: + inputs: A PyTorch tensor of size (batch_size, num_classes) containing the predicted scores. + targets: A PyTorch tensor of size (batch_size) containing the ground-truth class indices. + + Returns: + A scalar tensor representing the capsule loss. + """ + # Compute the capsule loss + loss_t = self.cross_entropy_loss(inputs[:,0,:], targets) + + for i in range(inputs.size(1) - 1): + loss_t = loss_t + self.cross_entropy_loss(inputs[:,i+1,:], targets) + return loss_t diff --git a/training/loss/consistency_loss.py b/training/loss/consistency_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..89a7ce69ef5075f6260d5371cf280577a55c2691 --- /dev/null +++ b/training/loss/consistency_loss.py @@ -0,0 +1,54 @@ +import torch.nn as nn +import torch +from .abstract_loss_func import AbstractLossClass +from metrics.registry import LOSSFUNC + + +@LOSSFUNC.register_module(module_name="consistency_loss") +class ConsistencyCos(nn.Module): + def __init__(self): + super(ConsistencyCos, self).__init__() + # # CrossEntropy Loss + # weight=torch.Tensor([4.0, 1.0]) + # if torch.cuda.is_available(): + # weight = weight.cuda() + # self.loss_fn = nn.CrossEntropyLoss(weight) + self.loss_fn = nn.CrossEntropyLoss() + self.mse_fn = nn.MSELoss() + + def forward(self, feat, inputs, targets): + feat = nn.functional.normalize(feat, dim=1) + feat_0 = feat[:int(feat.size(0)/2),:] + feat_1 = feat[int(feat.size(0)/2): 2*int(feat.size(0)/2),:] + + cos = torch.einsum('nc,nc->n', [feat_0, feat_1]).unsqueeze(-1) + labels = torch.ones((cos.shape[0],1), dtype=torch.float, requires_grad=False) + if torch.cuda.is_available(): + labels = labels.cuda() + self.consistency_rate = 1.0 + loss = self.consistency_rate * self.mse_fn(cos, labels) + self.loss_fn(inputs, targets) + return loss + +# +##FIXME to be implemented +class ConsistencyL2(nn.Module): + def __init__(self): + super(ConsistencyL2, self).__init__() + self.mse_fn = nn.MSELoss() + + def forward(self, feat): + feat_0 = feat[:int(feat.size(0)/2),:] + feat_1 = feat[int(feat.size(0)/2):,:] + loss = self.mse_fn(feat_0, feat_1) + return loss + +class ConsistencyL1(nn.Module): + def __init__(self): + super(ConsistencyL1, self).__init__() + self.L1_fn = nn.L1Loss() + + def forward(self, feat): + feat_0 = feat[:int(feat.size(0)/2),:] + feat_1 = feat[int(feat.size(0)/2):,:] + loss = self.L1_fn(feat_0, feat_1) + return loss \ No newline at end of file diff --git a/training/loss/contrastive_regularization.py b/training/loss/contrastive_regularization.py new file mode 100644 index 0000000000000000000000000000000000000000..8e5bb7c3fee3dc66f6c2028ea0a1dbffbe25476c --- /dev/null +++ b/training/loss/contrastive_regularization.py @@ -0,0 +1,78 @@ +import random +from collections import defaultdict +import torch +import torch.nn as nn +import torch.nn.functional as F +from .abstract_loss_func import AbstractLossClass +from metrics.registry import LOSSFUNC + + +def swap_spe_features(type_list, value_list): + type_list = type_list.cpu().numpy().tolist() + # get index + index_list = list(range(len(type_list))) + + # init a dict, where its key is the type and value is the index + spe_dict = defaultdict(list) + + # do for-loop to get spe dict + for i, one_type in enumerate(type_list): + spe_dict[one_type].append(index_list[i]) + + # shuffle the value list of each key + for keys in spe_dict.keys(): + random.shuffle(spe_dict[keys]) + + # generate a new index list for the value list + new_index_list = [] + for one_type in type_list: + value = spe_dict[one_type].pop() + new_index_list.append(value) + + # swap the value_list by new_index_list + value_list_new = value_list[new_index_list] + + return value_list_new + + +@LOSSFUNC.register_module(module_name="contrastive_regularization") +class ContrastiveLoss(AbstractLossClass): + def __init__(self, margin=1.0): + super().__init__() + self.margin = margin + + def contrastive_loss(self, anchor, positive, negative): + dist_pos = F.pairwise_distance(anchor, positive) + dist_neg = F.pairwise_distance(anchor, negative) + # Compute loss as the distance between anchor and negative minus the distance between anchor and positive + loss = torch.mean(torch.clamp(dist_pos - dist_neg + self.margin, min=0.0)) + return loss + + def forward(self, common, specific, spe_label): + # prepare + bs = common.shape[0] + real_common, fake_common = common.chunk(2) + ### common real + idx_list = list(range(0, bs//2)) + random.shuffle(idx_list) + real_common_anchor = common[idx_list] + ### common fake + idx_list = list(range(bs//2, bs)) + random.shuffle(idx_list) + fake_common_anchor = common[idx_list] + ### specific + specific_anchor = swap_spe_features(spe_label, specific) + real_specific_anchor, fake_specific_anchor = specific_anchor.chunk(2) + real_specific, fake_specific = specific.chunk(2) + + # Compute the contrastive loss of common between real and fake + loss_realcommon = self.contrastive_loss(real_common, real_common_anchor, fake_common_anchor) + loss_fakecommon = self.contrastive_loss(fake_common, fake_common_anchor, real_common_anchor) + + # Comupte the constrastive loss of specific between real and fake + loss_realspecific = self.contrastive_loss(real_specific, real_specific_anchor, fake_specific_anchor) + loss_fakespecific = self.contrastive_loss(fake_specific, fake_specific_anchor, real_specific_anchor) + + # Compute the final loss as the sum of all contrastive losses + loss = loss_realcommon + loss_fakecommon + loss_fakespecific + loss_realspecific + return loss \ No newline at end of file diff --git a/training/loss/cross_entropy_loss.py b/training/loss/cross_entropy_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..efa7123ed0ee0516743fa41d43b53e063c21a460 --- /dev/null +++ b/training/loss/cross_entropy_loss.py @@ -0,0 +1,26 @@ +import torch.nn as nn +from .abstract_loss_func import AbstractLossClass +from metrics.registry import LOSSFUNC + + +@LOSSFUNC.register_module(module_name="cross_entropy") +class CrossEntropyLoss(AbstractLossClass): + def __init__(self): + super().__init__() + self.loss_fn = nn.CrossEntropyLoss() + + def forward(self, inputs, targets): + """ + Computes the cross-entropy loss. + + Args: + inputs: A PyTorch tensor of size (batch_size, num_classes) containing the predicted scores. + targets: A PyTorch tensor of size (batch_size) containing the ground-truth class indices. + + Returns: + A scalar tensor representing the cross-entropy loss. + """ + # Compute the cross-entropy loss + loss = self.loss_fn(inputs, targets) + + return loss \ No newline at end of file diff --git a/training/loss/id_loss.py b/training/loss/id_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..43cc0a12ed3c7b5aad53bb7caec82c01d4fa29aa --- /dev/null +++ b/training/loss/id_loss.py @@ -0,0 +1,16 @@ +import torch +import torch.nn as nn +from .abstract_loss_func import AbstractLossClass +from metrics.registry import LOSSFUNC + +@LOSSFUNC.register_module(module_name="id_loss") +class IDLoss(AbstractLossClass): + def __init__(self, margin=0.5): + super().__init__() + self.cosine_similarity = nn.CosineSimilarity(dim=1, eps=1e-6) + self.margin = margin + + def forward(self, x1, x2): + cosine_similarity = self.cosine_similarity(x1, x2) + theta = torch.acos(cosine_similarity) + return 1 - torch.cos(theta + self.margin) \ No newline at end of file diff --git a/training/loss/js_loss.py b/training/loss/js_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..ccdce81ef8eb4e8362f13e305ccb2255fae42bc4 --- /dev/null +++ b/training/loss/js_loss.py @@ -0,0 +1,32 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from .abstract_loss_func import AbstractLossClass +from metrics.registry import LOSSFUNC + + +@LOSSFUNC.register_module(module_name="jsloss") +class JS_Loss(AbstractLossClass): + def __init__(self): + super().__init__() + + def forward(self, inputs, targets): + """ + Computes the Jensen-Shannon divergence loss. + """ + # Compute the probability distributions + inputs_prob = F.softmax(inputs, dim=1) + targets_prob = F.softmax(targets, dim=1) + + # Compute the average probability distribution + avg_prob = (inputs_prob + targets_prob) / 2 + + # Compute the KL divergence component for each distribution + kl_div_loss = nn.KLDivLoss(reduction='batchmean') + kl_inputs = kl_div_loss(inputs_prob.log(), avg_prob) + kl_targets = kl_div_loss(targets_prob.log(), avg_prob) + + # Compute the Jensen-Shannon divergence + loss = 0.5 * (kl_inputs + kl_targets) + + return loss \ No newline at end of file diff --git a/training/loss/l1_loss.py b/training/loss/l1_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..f2bfdedb628c802ef8ef8ebe977ad5c9a3ce1a37 --- /dev/null +++ b/training/loss/l1_loss.py @@ -0,0 +1,19 @@ +import torch.nn as nn +from .abstract_loss_func import AbstractLossClass +from metrics.registry import LOSSFUNC + + +@LOSSFUNC.register_module(module_name="l1loss") +class L1Loss(AbstractLossClass): + def __init__(self): + super().__init__() + self.loss_fn = nn.L1Loss() + + def forward(self, inputs, targets): + """ + Computes the l1 loss. + """ + # Compute the l1 loss + loss = self.loss_fn(inputs, targets) + + return loss \ No newline at end of file diff --git a/training/loss/patch_consistency_loss.py b/training/loss/patch_consistency_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..5b18016e5de81ddfc034b003db5e60b29470739b --- /dev/null +++ b/training/loss/patch_consistency_loss.py @@ -0,0 +1,76 @@ +import torch +from metrics.registry import LOSSFUNC +from .abstract_loss_func import AbstractLossClass + + +def mahalanobis_distance(values: torch.Tensor, mean: torch.Tensor, inv_covariance: torch.Tensor) -> torch.Tensor: + """Compute the batched mahalanobis distance. + + values is a batch of feature vectors. + mean is either the mean of the distribution to compare, or a second + batch of feature vectors. + inv_covariance is the inverse covariance of the target distribution. + """ + assert values.dim() == 2 + assert 1 <= mean.dim() <= 2 + assert inv_covariance.dim() == 2 + assert values.shape[1] == mean.shape[-1] + assert mean.shape[-1] == inv_covariance.shape[0] + assert inv_covariance.shape[0] == inv_covariance.shape[1] + + if mean.dim() == 1: # Distribution mean. + mean = mean.unsqueeze(0) + x_mu = values - mean # batch x features + # Same as dist = x_mu.t() * inv_covariance * x_mu batch wise + dist = torch.einsum("im,mn,in->i", x_mu, inv_covariance, x_mu) + + return dist.sqrt() + + +@LOSSFUNC.register_module(module_name="patch_consistency_loss") +class PatchConsistencyLoss(AbstractLossClass): + def __init__(self, c_real, c_fake, c_cross): + super().__init__() + self.c_real = c_real + self.c_fake = c_fake + self.c_cross = c_cross + + def forward(self, attention_map_real, attention_map_fake, feature_patch, real_feature_mean, real_inv_covariance, + fake_feature_mean, fake_inv_covariance, labels): + # calculate mahalanobis distance + B, H, W, C = feature_patch.size() + dist_real = mahalanobis_distance(feature_patch.reshape(B * H * W, C), real_feature_mean.cuda(), + real_inv_covariance.cuda()) + dist_fake = mahalanobis_distance(feature_patch.reshape(B * H * W, C), fake_feature_mean.cuda(), + fake_inv_covariance.cuda()) + fake_indices = torch.where(labels == 1.0)[0] + index_map = torch.relu(dist_real - dist_fake).reshape((B, H, W))[fake_indices, :] + + # loss for real samples + if attention_map_real.shape[0] == 0: + loss_real = 0 + else: + B, PP, PP = attention_map_real.shape + c_matrix = (1 - self.c_real) * torch.eye(PP).cuda() + self.c_real * torch.ones(PP).cuda() + c_matrix = c_matrix.expand(B, -1, -1) + loss_real = torch.sum(torch.abs(attention_map_real - c_matrix)) / (B * (PP * PP - PP)) + + if attention_map_fake.shape[0] == 0: + loss_fake = 0 + else: + B, PP, PP = attention_map_fake.shape + c_matrix = [] + for b in range(B): + fake_indices = torch.where(index_map[b].reshape(-1) > 0)[0] + real_indices = torch.where(index_map[b].reshape(-1) <= 0)[0] + tmp = torch.zeros((PP, PP)).cuda() + self.c_cross + for i in fake_indices: + tmp[i, fake_indices] = self.c_fake + for i in real_indices: + tmp[i, real_indices] = self.c_real + c_matrix.append(tmp) + + c_matrix = torch.stack(c_matrix).cuda() + loss_fake = torch.sum(torch.abs(attention_map_fake - c_matrix)) / (B * (PP * PP - PP)) + + return loss_real + loss_fake diff --git a/training/loss/region_independent_loss.py b/training/loss/region_independent_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..71905d9aa493b9e7829eda287aed4a7e0bf52d5d --- /dev/null +++ b/training/loss/region_independent_loss.py @@ -0,0 +1,56 @@ +import torch +import torch.nn.functional as F +from detectors.multi_attention_detector import AttentionPooling +from .abstract_loss_func import AbstractLossClass +from metrics.registry import LOSSFUNC + + +@LOSSFUNC.register_module(module_name="region_independent_loss") +class RegionIndependentLoss(AbstractLossClass): + def __init__(self, M, N, alpha, alpha_decay, decay_batch, inter_margin, intra_margin): + super().__init__() + feature_centers = torch.zeros(M, N) + self.register_buffer("feature_centers", + feature_centers.cuda() if torch.cuda.is_available() else feature_centers) + self.alpha = alpha + self.alpha_decay = alpha_decay + self.decay_batch = decay_batch + self.batch_cnt = 0 + self.inter_margin = inter_margin + intra_margin = torch.Tensor(intra_margin) + self.register_buffer("intra_margin", intra_margin.cuda() if torch.cuda.is_available() else intra_margin) + self.atp = AttentionPooling() + + def forward(self, feature_maps_d, attention_maps, labels): + B, N, H, W = feature_maps_d.size() + B, M, AH, AW = attention_maps.size() + if AH != H or AW != W: + attention_maps = F.interpolate(attention_maps, (H, W), mode='bilinear', align_corners=True) + feature_matrix = self.atp(feature_maps_d, attention_maps) + + # Calculate new feature centers. P.s., I don't know why to use no_grad() and detach() for so many times. + feature_centers = self.feature_centers.detach() + new_feature_centers = feature_centers + self.alpha * torch.mean(feature_matrix - feature_centers, dim=0) + new_feature_centers = new_feature_centers.detach() + with torch.no_grad(): + self.feature_centers = new_feature_centers + + # Calculate intra-class loss + intra_margins = torch.gather(self.intra_margin.repeat(B, 1), dim=1, index=labels.unsqueeze(1)) + intra_class_loss = torch.mean(F.relu(torch.norm(feature_matrix - new_feature_centers, dim=-1) - intra_margins)) + + # Calculate inter-class loss + inter_class_loss = 0 + for i in range(M): + for j in range(i + 1, M): + inter_class_loss += F.relu( + self.inter_margin - torch.dist(new_feature_centers[i], new_feature_centers[j]), inplace=False) + inter_class_loss = inter_class_loss / M / self.alpha + + # Count batch, this is used to simulate epoch, since alpha cannot be modified based on epoch due to code + # structure. self.alpha should be modified every N batch. + self.batch_cnt += 1 + if self.batch_cnt % self.decay_batch == 0: + self.alpha *= self.alpha_decay + + return inter_class_loss + intra_class_loss diff --git a/training/loss/supercontrast_loss.py b/training/loss/supercontrast_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..7b89cd67994873894787bb6e12f0ab6f583b4bf4 --- /dev/null +++ b/training/loss/supercontrast_loss.py @@ -0,0 +1,109 @@ +""" +Author: Yonglong Tian (yonglong@mit.edu) +Date: May 07, 2020 +""" +from __future__ import print_function + +import torch +import torch.nn as nn +from .abstract_loss_func import AbstractLossClass +from metrics.registry import LOSSFUNC + + +@LOSSFUNC.register_module(module_name="supcon") +class SupConLoss(AbstractLossClass): + """Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf. + It also supports the unsupervised contrastive loss in SimCLR""" + def __init__(self, temperature=0.07, contrast_mode='all', + base_temperature=0.07): + super().__init__() + self.temperature = temperature + self.contrast_mode = contrast_mode + self.base_temperature = base_temperature + + def forward(self, features, labels=None, mask=None): + """Compute loss for model. If both `labels` and `mask` are None, + it degenerates to SimCLR unsupervised loss: + https://arxiv.org/pdf/2002.05709.pdf + + Args: + features: hidden vector of shape [bsz, n_views, ...]. + labels: ground truth of shape [bsz]. + mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j + has the same class as sample i. Can be asymmetric. + Returns: + A loss scalar. + """ + device = (torch.device('cuda') + if features.is_cuda + else torch.device('cpu')) + + if len(features.shape) < 3: + raise ValueError('`features` needs to be [bsz, n_views, ...],' + 'at least 3 dimensions are required') + if len(features.shape) > 3: + features = features.view(features.shape[0], features.shape[1], -1) + + batch_size = features.shape[0] + if labels is not None and mask is not None: + raise ValueError('Cannot define both `labels` and `mask`') + elif labels is None and mask is None: + mask = torch.eye(batch_size, dtype=torch.float32).to(device) + elif labels is not None: + labels = labels.contiguous().view(-1, 1) + if labels.shape[0] != batch_size: + raise ValueError('Num of labels does not match num of features') + mask = torch.eq(labels, labels.T).to(device) + else: + mask = mask.to(device) + + contrast_count = features.shape[1] + contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0) + if self.contrast_mode == 'one': + anchor_feature = features[:, 0] + anchor_count = 1 + elif self.contrast_mode == 'all': + anchor_feature = contrast_feature + anchor_count = contrast_count + else: + raise ValueError('Unknown mode: {}'.format(self.contrast_mode)) + + # compute logits + anchor_dot_contrast = torch.div( + torch.matmul(anchor_feature, contrast_feature.T), + self.temperature) + # for numerical stability + logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True) + logits = anchor_dot_contrast - logits_max.detach() + + # tile mask + mask = mask.repeat(anchor_count, contrast_count) + # mask-out self-contrast cases + logits_mask = torch.scatter( + torch.ones_like(mask), + 1, + torch.arange(batch_size * anchor_count).view(-1, 1).to(device), + 0 + ) + mask = mask * logits_mask + + # compute log_prob + exp_logits = torch.exp(logits) * logits_mask + log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True)) + + # compute mean of log-likelihood over positive + # modified to handle edge cases when there is no positive pair + # for an anchor point. + # Edge case e.g.:- + # features of shape: [4,1,...] + # labels: [0,1,1,2] + # loss before mean: [nan, ..., ..., nan] + mask_pos_pairs = mask.sum(1) + mask_pos_pairs = torch.where(mask_pos_pairs < 1e-6, 1, mask_pos_pairs) + mean_log_prob_pos = (mask * log_prob).sum(1) / mask_pos_pairs + + # loss + loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos + loss = loss.view(anchor_count, batch_size).mean() + + return loss \ No newline at end of file diff --git a/training/loss/vgg_loss.py b/training/loss/vgg_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..8ce09b72e11bb9474fe7e466547a1486045925b8 --- /dev/null +++ b/training/loss/vgg_loss.py @@ -0,0 +1,152 @@ +"""A VGG-based perceptual loss function for PyTorch.""" + +import torch +from torch import nn +from torch.nn import functional as F +from torchvision import models, transforms +import torch +import torch.nn as nn +from .abstract_loss_func import AbstractLossClass +from metrics.registry import LOSSFUNC + + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + +class Lambda(nn.Module): + """Wraps a callable in an :class:`nn.Module` without registering it.""" + + def __init__(self, func): + super().__init__() + object.__setattr__(self, 'forward', func) + + def extra_repr(self): + return getattr(self.forward, '__name__', type(self.forward).__name__) + '()' + + +class WeightedLoss(nn.ModuleList): + """A weighted combination of multiple loss functions.""" + + def __init__(self, losses, weights, verbose=False): + super().__init__() + for loss in losses: + self.append(loss if isinstance(loss, nn.Module) else Lambda(loss)) + self.weights = weights + self.verbose = verbose + + def _print_losses(self, losses): + for i, loss in enumerate(losses): + print(f'({i}) {type(self[i]).__name__}: {loss.item()}') + + def forward(self, *args, **kwargs): + losses = [] + for loss, weight in zip(self, self.weights): + losses.append(loss(*args, **kwargs) * weight) + if self.verbose: + self._print_losses(losses) + return sum(losses) + + +class TVLoss(nn.Module): + """Total variation loss (Lp penalty on image gradient magnitude). + The input must be 4D. If a target (second parameter) is passed in, it is + ignored. + ``p=1`` yields the vectorial total variation norm. It is a generalization + of the originally proposed (isotropic) 2D total variation norm (see + (see https://en.wikipedia.org/wiki/Total_variation_denoising) for color + images. On images with a single channel it is equal to the 2D TV norm. + ``p=2`` yields a variant that is often used for smoothing out noise in + reconstructions of images from neural network feature maps (see Mahendran + and Vevaldi, "Understanding Deep Image Representations by Inverting + Them", https://arxiv.org/abs/1412.0035) + :attr:`reduction` can be set to ``'mean'``, ``'sum'``, or ``'none'`` + similarly to the loss functions in :mod:`torch.nn`. The default is + ``'mean'``. + """ + + def __init__(self, p, reduction='mean', eps=1e-8): + super().__init__() + if p not in {1, 2}: + raise ValueError('p must be 1 or 2') + if reduction not in {'mean', 'sum', 'none'}: + raise ValueError("reduction must be 'mean', 'sum', or 'none'") + self.p = p + self.reduction = reduction + self.eps = eps + + def forward(self, input, target=None): + input = F.pad(input, (0, 1, 0, 1), 'replicate') + x_diff = input[..., :-1, :-1] - input[..., :-1, 1:] + y_diff = input[..., :-1, :-1] - input[..., 1:, :-1] + diff = x_diff**2 + y_diff**2 + if self.p == 1: + diff = (diff + self.eps).mean(dim=1, keepdims=True).sqrt() + if self.reduction == 'mean': + return diff.mean() + if self.reduction == 'sum': + return diff.sum() + return diff + + +@LOSSFUNC.register_module(module_name="vgg_loss") +class VGGLoss(AbstractLossClass): + """Computes the VGG perceptual loss between two batches of images. + The input and target must be 4D tensors with three channels + ``(B, 3, H, W)`` and must have equivalent shapes. Pixel values should be + normalized to the range 0–1. + The VGG perceptual loss is the mean squared difference between the features + computed for the input and target at layer :attr:`layer` (default 8, or + ``relu2_2``) of the pretrained model specified by :attr:`model` (either + ``'vgg16'`` (default) or ``'vgg19'``). + If :attr:`shift` is nonzero, a random shift of at most :attr:`shift` + pixels in both height and width will be applied to all images in the input + and target. The shift will only be applied when the loss function is in + training mode, and will not be applied if a precomputed feature map is + supplied as the target. + :attr:`reduction` can be set to ``'mean'``, ``'sum'``, or ``'none'`` + similarly to the loss functions in :mod:`torch.nn`. The default is + ``'mean'``. + :meth:`get_features()` may be used to precompute the features for the + target, to speed up the case where inputs are compared against the same + target over and over. To use the precomputed features, pass them in as + :attr:`target` and set :attr:`target_is_features` to :code:`True`. + Instances of :class:`VGGLoss` must be manually converted to the same + device and dtype as their inputs. + """ + + models = {'vgg16': models.vgg16, 'vgg19': models.vgg19} + + def __init__(self, model='vgg16', layer=8, shift=0, reduction='mean'): + super().__init__() + self.instancenorm = nn.InstanceNorm2d(512, affine=False) + self.shift = shift + self.reduction = reduction + self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + self.model = self.models[model](pretrained=True).features[:layer+1] + self.model.eval() + self.model.requires_grad_(False) + self.model.to(device) + + def get_features(self, input): + return self.model(self.normalize(input)) + + def train(self, mode=True): + self.training = mode + + def forward(self, input, target, target_is_features=False): + if target_is_features: + input_feats = self.get_features(input) + target_feats = target + else: + sep = input.shape[0] + batch = torch.cat([input, target]) + if self.shift and self.training: + padded = F.pad(batch, [self.shift] * 4, mode='replicate') + batch = transforms.RandomCrop(batch.shape[2:])(padded) + feats = self.get_features(batch) + input_feats, target_feats = feats[:sep], feats[sep:] + # input_feats, target_feats = \ + # self.instancenorm(input_feats), \ + # self.instancenorm(target_feats) + return F.mse_loss(input_feats, target_feats, reduction=self.reduction) \ No newline at end of file diff --git a/training/metrics/__init__.py b/training/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..676145d777810e4a51bdaf59fdec4f5358aae349 --- /dev/null +++ b/training/metrics/__init__.py @@ -0,0 +1,7 @@ +import os +import sys +current_file_path = os.path.abspath(__file__) +parent_dir = os.path.dirname(os.path.dirname(current_file_path)) +project_root_dir = os.path.dirname(parent_dir) +sys.path.append(parent_dir) +sys.path.append(project_root_dir) diff --git a/training/metrics/base_metrics_class.py b/training/metrics/base_metrics_class.py new file mode 100644 index 0000000000000000000000000000000000000000..4e3f33b3707e0adc53e5c68848bc4c51132e53ca --- /dev/null +++ b/training/metrics/base_metrics_class.py @@ -0,0 +1,205 @@ +import numpy as np +from sklearn import metrics +from collections import defaultdict +import torch +import torch.nn as nn + + +def get_accracy(output, label): + _, prediction = torch.max(output, 1) # argmax + correct = (prediction == label).sum().item() + accuracy = correct / prediction.size(0) + return accuracy + + +def get_prediction(output, label): + prob = nn.functional.softmax(output, dim=1)[:, 1] + prob = prob.view(prob.size(0), 1) + label = label.view(label.size(0), 1) + #print(prob.size(), label.size()) + datas = torch.cat((prob, label.float()), dim=1) + return datas + + +def calculate_metrics_for_train(label, output): + if output.size(1) == 2: + prob = torch.softmax(output, dim=1)[:, 1] + else: + prob = output + + # Accuracy + _, prediction = torch.max(output, 1) + correct = (prediction == label).sum().item() + accuracy = correct / prediction.size(0) + + # Average Precision + y_true = label.cpu().detach().numpy() + y_pred = prob.cpu().detach().numpy() + ap = metrics.average_precision_score(y_true, y_pred) + + # AUC and EER + try: + fpr, tpr, thresholds = metrics.roc_curve(label.squeeze().cpu().numpy(), + prob.squeeze().cpu().numpy(), + pos_label=1) + except: + # for the case when we only have one sample + return None, None, accuracy, ap + + if np.isnan(fpr[0]) or np.isnan(tpr[0]): + # for the case when all the samples within a batch is fake/real + auc, eer = None, None + else: + auc = metrics.auc(fpr, tpr) + fnr = 1 - tpr + eer = fpr[np.nanargmin(np.absolute((fnr - fpr)))] + + return auc, eer, accuracy, ap + + +# ------------ compute average metrics of batches--------------------- +class Metrics_batch(): + def __init__(self): + self.tprs = [] + self.mean_fpr = np.linspace(0, 1, 100) + self.aucs = [] + self.eers = [] + self.aps = [] + + self.correct = 0 + self.total = 0 + self.losses = [] + + def update(self, label, output): + acc = self._update_acc(label, output) + if output.size(1) == 2: + prob = torch.softmax(output, dim=1)[:, 1] + else: + prob = output + #label = 1-label + #prob = torch.softmax(output, dim=1)[:, 1] + auc, eer = self._update_auc(label, prob) + ap = self._update_ap(label, prob) + + return acc, auc, eer, ap + + def _update_auc(self, lab, prob): + fpr, tpr, thresholds = metrics.roc_curve(lab.squeeze().cpu().numpy(), + prob.squeeze().cpu().numpy(), + pos_label=1) + if np.isnan(fpr[0]) or np.isnan(tpr[0]): + return -1, -1 + + auc = metrics.auc(fpr, tpr) + interp_tpr = np.interp(self.mean_fpr, fpr, tpr) + interp_tpr[0] = 0.0 + self.tprs.append(interp_tpr) + self.aucs.append(auc) + + # return auc + + # EER + fnr = 1 - tpr + eer = fpr[np.nanargmin(np.absolute((fnr - fpr)))] + self.eers.append(eer) + + return auc, eer + + def _update_acc(self, lab, output): + _, prediction = torch.max(output, 1) # argmax + correct = (prediction == lab).sum().item() + accuracy = correct / prediction.size(0) + # self.accs.append(accuracy) + self.correct = self.correct+correct + self.total = self.total+lab.size(0) + return accuracy + + def _update_ap(self, label, prob): + y_true = label.cpu().detach().numpy() + y_pred = prob.cpu().detach().numpy() + ap = metrics.average_precision_score(y_true,y_pred) + self.aps.append(ap) + + return np.mean(ap) + + def get_mean_metrics(self): + mean_acc, std_acc = self.correct/self.total, 0 + mean_auc, std_auc = self._mean_auc() + mean_err, std_err = np.mean(self.eers), np.std(self.eers) + mean_ap, std_ap = np.mean(self.aps), np.std(self.aps) + + return {'acc':mean_acc, 'auc':mean_auc, 'eer':mean_err, 'ap':mean_ap} + + def _mean_auc(self): + mean_tpr = np.mean(self.tprs, axis=0) + mean_tpr[-1] = 1.0 + mean_auc = metrics.auc(self.mean_fpr, mean_tpr) + std_auc = np.std(self.aucs) + return mean_auc, std_auc + + def clear(self): + self.tprs.clear() + self.aucs.clear() + # self.accs.clear() + self.correct=0 + self.total=0 + self.eers.clear() + self.aps.clear() + self.losses.clear() + + +# ------------ compute average metrics of all data --------------------- +class Metrics_all(): + def __init__(self): + self.probs = [] + self.labels = [] + self.correct = 0 + self.total = 0 + + def store(self, label, output): + prob = torch.softmax(output, dim=1)[:, 1] + _, prediction = torch.max(output, 1) # argmax + correct = (prediction == label).sum().item() + self.correct += correct + self.total += label.size(0) + self.labels.append(label.squeeze().cpu().numpy()) + self.probs.append(prob.squeeze().cpu().numpy()) + + def get_metrics(self): + y_pred = np.concatenate(self.probs) + y_true = np.concatenate(self.labels) + # auc + fpr, tpr, thresholds = metrics.roc_curve(y_true,y_pred,pos_label=1) + auc = metrics.auc(fpr, tpr) + # eer + fnr = 1 - tpr + eer = fpr[np.nanargmin(np.absolute((fnr - fpr)))] + # ap + ap = metrics.average_precision_score(y_true,y_pred) + # acc + acc = self.correct / self.total + return {'acc':acc, 'auc':auc, 'eer':eer, 'ap':ap} + + def clear(self): + self.probs.clear() + self.labels.clear() + self.correct = 0 + self.total = 0 + + +# only used to record a series of scalar value +class Recorder: + def __init__(self): + self.sum = 0 + self.num = 0 + def update(self, item, num=1): + if item is not None: + self.sum += item * num + self.num += num + def average(self): + if self.num == 0: + return None + return self.sum/self.num + def clear(self): + self.sum = 0 + self.num = 0 diff --git a/training/metrics/registry.py b/training/metrics/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..86e256c18d0ad522de79149a154f676fd0bdb414 --- /dev/null +++ b/training/metrics/registry.py @@ -0,0 +1,20 @@ +class Registry(object): + def __init__(self): + self.data = {} + + def register_module(self, module_name=None): + def _register(cls): + name = module_name + if module_name is None: + name = cls.__name__ + self.data[name] = cls + return cls + return _register + + def __getitem__(self, key): + return self.data[key] + +BACKBONE = Registry() +DETECTOR = Registry() +TRAINER = Registry() +LOSSFUNC = Registry() diff --git a/training/metrics/utils.py b/training/metrics/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..606d35cb96afb8beff54637581232bb396712f96 --- /dev/null +++ b/training/metrics/utils.py @@ -0,0 +1,93 @@ +from sklearn import metrics +import numpy as np + + +def parse_metric_for_print(metric_dict): + if metric_dict is None: + return "\n" + str = "\n" + str += "================================ Each dataset best metric ================================ \n" + for key, value in metric_dict.items(): + if key != 'avg': + str= str+ f"| {key}: " + for k,v in value.items(): + str = str + f" {k}={v} " + str= str+ "| \n" + else: + str += "============================================================================================= \n" + str += "================================== Average best metric ====================================== \n" + avg_dict = value + for avg_key, avg_value in avg_dict.items(): + if avg_key == 'dataset_dict': + for key,value in avg_value.items(): + str = str + f"| {key}: {value} | \n" + else: + str = str + f"| avg {avg_key}: {avg_value} | \n" + str += "=============================================================================================" + return str + + +def get_test_metrics(y_pred, y_true, img_names): + def get_video_metrics(image, pred, label): + result_dict = {} + new_label = [] + new_pred = [] + # print(image[0]) + # print(pred.shape) + # print(label.shape) + for item in np.transpose(np.stack((image, pred, label)), (1, 0)): + + s = item[0] + if '\\' in s: + parts = s.split('\\') + else: + parts = s.split('/') + a = parts[-2] + b = parts[-1] + + if a not in result_dict: + result_dict[a] = [] + + result_dict[a].append(item) + image_arr = list(result_dict.values()) + + for video in image_arr: + pred_sum = 0 + label_sum = 0 + leng = 0 + for frame in video: + pred_sum += float(frame[1]) + label_sum += int(frame[2]) + leng += 1 + new_pred.append(pred_sum / leng) + new_label.append(int(label_sum / leng)) + fpr, tpr, thresholds = metrics.roc_curve(new_label, new_pred) + v_auc = metrics.auc(fpr, tpr) + fnr = 1 - tpr + v_eer = fpr[np.nanargmin(np.absolute((fnr - fpr)))] + return v_auc, v_eer + + + y_pred = y_pred.squeeze() + # For UCF, where labels for different manipulations are not consistent. + y_true[y_true >= 1] = 1 + # auc + fpr, tpr, thresholds = metrics.roc_curve(y_true, y_pred, pos_label=1) + auc = metrics.auc(fpr, tpr) + # eer + fnr = 1 - tpr + eer = fpr[np.nanargmin(np.absolute((fnr - fpr)))] + # ap + ap = metrics.average_precision_score(y_true, y_pred) + # acc + prediction_class = (y_pred > 0.5).astype(int) + correct = (prediction_class == np.clip(y_true, a_min=0, a_max=1)).sum().item() + acc = correct / len(prediction_class) + if type(img_names[0]) is not list: + # calculate video-level auc for the frame-level methods. + v_auc, _ = get_video_metrics(img_names, y_pred, y_true) + else: + # video-level methods + v_auc=auc + + return {'acc': acc, 'auc': auc, 'eer': eer, 'ap': ap, 'pred': y_pred, 'video_auc': v_auc, 'label': y_true} diff --git a/training/networks/__init__.py b/training/networks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a649c89fd79b33b4898ed7f1ae53af087bfe6d32 --- /dev/null +++ b/training/networks/__init__.py @@ -0,0 +1,15 @@ +import os +import sys +current_file_path = os.path.abspath(__file__) +parent_dir = os.path.dirname(os.path.dirname(current_file_path)) +project_root_dir = os.path.dirname(parent_dir) +sys.path.append(parent_dir) +sys.path.append(project_root_dir) + +from metrics.registry import BACKBONE + +from .xception import Xception +from .mesonet import Meso4, MesoInception4 +from .resnet34 import ResNet34 +from .efficientnetb4 import EfficientNetB4 +from .xception_sladd import Xception_SLADD diff --git a/training/networks/adaface.py b/training/networks/adaface.py new file mode 100644 index 0000000000000000000000000000000000000000..21730fdac4bed733ba2cde876884f5b46d8ea230 --- /dev/null +++ b/training/networks/adaface.py @@ -0,0 +1,414 @@ +from collections import namedtuple +import torch +import torch.nn as nn +from torch.nn import Dropout +from torch.nn import MaxPool2d +from torch.nn import Sequential +from torch.nn import Conv2d, Linear +from torch.nn import BatchNorm1d, BatchNorm2d +from torch.nn import ReLU, Sigmoid +from torch.nn import Module +from torch.nn import PReLU +import os + +def build_model(model_name='ir_50'): + if model_name == 'ir_101': + return IR_101(input_size=(112,112)) + elif model_name == 'ir_50': + return IR_50(input_size=(112,112)) + elif model_name == 'ir_se_50': + return IR_SE_50(input_size=(112,112)) + elif model_name == 'ir_34': + return IR_34(input_size=(112,112)) + elif model_name == 'ir_18': + return IR_18(input_size=(112,112)) + else: + raise ValueError('not a correct model name', model_name) + +def initialize_weights(modules): + """ Weight initilize, conv2d and linear is initialized with kaiming_normal + """ + for m in modules: + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, + mode='fan_out', + nonlinearity='relu') + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + nn.init.kaiming_normal_(m.weight, + mode='fan_out', + nonlinearity='relu') + if m.bias is not None: + m.bias.data.zero_() + + +class Flatten(Module): + """ Flat tensor + """ + def forward(self, input): + return input.view(input.size(0), -1) + + +class LinearBlock(Module): + """ Convolution block without no-linear activation layer + """ + def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1): + super(LinearBlock, self).__init__() + self.conv = Conv2d(in_c, out_c, kernel, stride, padding, groups=groups, bias=False) + self.bn = BatchNorm2d(out_c) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + +class GNAP(Module): + """ Global Norm-Aware Pooling block + """ + def __init__(self, in_c): + super(GNAP, self).__init__() + self.bn1 = BatchNorm2d(in_c, affine=False) + self.pool = nn.AdaptiveAvgPool2d((1, 1)) + self.bn2 = BatchNorm1d(in_c, affine=False) + + def forward(self, x): + x = self.bn1(x) + x_norm = torch.norm(x, 2, 1, True) + x_norm_mean = torch.mean(x_norm) + weight = x_norm_mean / x_norm + x = x * weight + x = self.pool(x) + x = x.view(x.shape[0], -1) + feature = self.bn2(x) + return feature + + +class GDC(Module): + """ Global Depthwise Convolution block + """ + def __init__(self, in_c, embedding_size): + super(GDC, self).__init__() + self.conv_6_dw = LinearBlock(in_c, in_c, + groups=in_c, + kernel=(7, 7), + stride=(1, 1), + padding=(0, 0)) + self.conv_6_flatten = Flatten() + self.linear = Linear(in_c, embedding_size, bias=False) + self.bn = BatchNorm1d(embedding_size, affine=False) + + def forward(self, x): + x = self.conv_6_dw(x) + x = self.conv_6_flatten(x) + x = self.linear(x) + x = self.bn(x) + return x + + +class SEModule(Module): + """ SE block + """ + def __init__(self, channels, reduction): + super(SEModule, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc1 = Conv2d(channels, channels // reduction, + kernel_size=1, padding=0, bias=False) + + nn.init.xavier_uniform_(self.fc1.weight.data) + + self.relu = ReLU(inplace=True) + self.fc2 = Conv2d(channels // reduction, channels, + kernel_size=1, padding=0, bias=False) + + self.sigmoid = Sigmoid() + + def forward(self, x): + module_input = x + x = self.avg_pool(x) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.sigmoid(x) + + return module_input * x + + + +class BasicBlockIR(Module): + """ BasicBlock for IRNet + """ + def __init__(self, in_channel, depth, stride): + super(BasicBlockIR, self).__init__() + if in_channel == depth: + self.shortcut_layer = MaxPool2d(1, stride) + else: + self.shortcut_layer = Sequential( + Conv2d(in_channel, depth, (1, 1), stride, bias=False), + BatchNorm2d(depth)) + self.res_layer = Sequential( + BatchNorm2d(in_channel), + Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), + BatchNorm2d(depth), + PReLU(depth), + Conv2d(depth, depth, (3, 3), stride, 1, bias=False), + BatchNorm2d(depth)) + + def forward(self, x): + shortcut = self.shortcut_layer(x) + res = self.res_layer(x) + + return res + shortcut + + +class BottleneckIR(Module): + """ BasicBlock with bottleneck for IRNet + """ + def __init__(self, in_channel, depth, stride): + super(BottleneckIR, self).__init__() + reduction_channel = depth // 4 + if in_channel == depth: + self.shortcut_layer = MaxPool2d(1, stride) + else: + self.shortcut_layer = Sequential( + Conv2d(in_channel, depth, (1, 1), stride, bias=False), + BatchNorm2d(depth)) + self.res_layer = Sequential( + BatchNorm2d(in_channel), + Conv2d(in_channel, reduction_channel, (1, 1), (1, 1), 0, bias=False), + BatchNorm2d(reduction_channel), + PReLU(reduction_channel), + Conv2d(reduction_channel, reduction_channel, (3, 3), (1, 1), 1, bias=False), + BatchNorm2d(reduction_channel), + PReLU(reduction_channel), + Conv2d(reduction_channel, depth, (1, 1), stride, 0, bias=False), + BatchNorm2d(depth)) + + def forward(self, x): + shortcut = self.shortcut_layer(x) + res = self.res_layer(x) + + return res + shortcut + + +class BasicBlockIRSE(BasicBlockIR): + def __init__(self, in_channel, depth, stride): + super(BasicBlockIRSE, self).__init__(in_channel, depth, stride) + self.res_layer.add_module("se_block", SEModule(depth, 16)) + + +class BottleneckIRSE(BottleneckIR): + def __init__(self, in_channel, depth, stride): + super(BottleneckIRSE, self).__init__(in_channel, depth, stride) + self.res_layer.add_module("se_block", SEModule(depth, 16)) + + +class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): + '''A named tuple describing a ResNet block.''' + + +def get_block(in_channel, depth, num_units, stride=2): + + return [Bottleneck(in_channel, depth, stride)] +\ + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)] + + +def get_blocks(num_layers): + if num_layers == 18: + blocks = [ + get_block(in_channel=64, depth=64, num_units=2), + get_block(in_channel=64, depth=128, num_units=2), + get_block(in_channel=128, depth=256, num_units=2), + get_block(in_channel=256, depth=512, num_units=2) + ] + elif num_layers == 34: + blocks = [ + get_block(in_channel=64, depth=64, num_units=3), + get_block(in_channel=64, depth=128, num_units=4), + get_block(in_channel=128, depth=256, num_units=6), + get_block(in_channel=256, depth=512, num_units=3) + ] + elif num_layers == 50: + blocks = [ + get_block(in_channel=64, depth=64, num_units=3), + get_block(in_channel=64, depth=128, num_units=4), + get_block(in_channel=128, depth=256, num_units=14), + get_block(in_channel=256, depth=512, num_units=3) + ] + elif num_layers == 100: + blocks = [ + get_block(in_channel=64, depth=64, num_units=3), + get_block(in_channel=64, depth=128, num_units=13), + get_block(in_channel=128, depth=256, num_units=30), + get_block(in_channel=256, depth=512, num_units=3) + ] + elif num_layers == 152: + blocks = [ + get_block(in_channel=64, depth=256, num_units=3), + get_block(in_channel=256, depth=512, num_units=8), + get_block(in_channel=512, depth=1024, num_units=36), + get_block(in_channel=1024, depth=2048, num_units=3) + ] + elif num_layers == 200: + blocks = [ + get_block(in_channel=64, depth=256, num_units=3), + get_block(in_channel=256, depth=512, num_units=24), + get_block(in_channel=512, depth=1024, num_units=36), + get_block(in_channel=1024, depth=2048, num_units=3) + ] + + return blocks + + +class Backbone(Module): + def __init__(self, input_size, num_layers, mode='ir'): + """ Args: + input_size: input_size of backbone + num_layers: num_layers of backbone + mode: support ir or irse + """ + super(Backbone, self).__init__() + assert input_size[0] in [112, 224], \ + "input_size should be [112, 112] or [224, 224]" + assert num_layers in [18, 34, 50, 100, 152, 200], \ + "num_layers should be 18, 34, 50, 100 or 152" + assert mode in ['ir', 'ir_se'], \ + "mode should be ir or ir_se" + self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), + BatchNorm2d(64), PReLU(64)) + blocks = get_blocks(num_layers) + if num_layers <= 100: + if mode == 'ir': + unit_module = BasicBlockIR + elif mode == 'ir_se': + unit_module = BasicBlockIRSE + output_channel = 512 + else: + if mode == 'ir': + unit_module = BottleneckIR + elif mode == 'ir_se': + unit_module = BottleneckIRSE + output_channel = 2048 + + if input_size[0] == 112: + self.output_layer = Sequential(BatchNorm2d(output_channel), + Dropout(0.4), Flatten(), + Linear(output_channel * 7 * 7, 512), + BatchNorm1d(512, affine=False)) + else: + self.output_layer = Sequential( + BatchNorm2d(output_channel), Dropout(0.4), Flatten(), + Linear(output_channel * 14 * 14, 512), + BatchNorm1d(512, affine=False)) + + modules = [] + for block in blocks: + for bottleneck in block: + modules.append( + unit_module(bottleneck.in_channel, bottleneck.depth, + bottleneck.stride)) + self.body = Sequential(*modules) + + initialize_weights(self.modules()) + + + def forward(self, x): + + # current code only supports one extra image + # it comes with a extra dimension for number of extra image. We will just squeeze it out for now + x = self.input_layer(x) + + for idx, module in enumerate(self.body): + x = module(x) + + x = self.output_layer(x) + norm = torch.norm(x, 2, 1, True) + output = torch.div(x, norm) + + return output, norm + + + +def IR_18(input_size): + """ Constructs a ir-18 model. + """ + model = Backbone(input_size, 18, 'ir') + + return model + + +def IR_34(input_size): + """ Constructs a ir-34 model. + """ + model = Backbone(input_size, 34, 'ir') + + return model + + +def IR_50(input_size): + """ Constructs a ir-50 model. + """ + model = Backbone(input_size, 50, 'ir') + + return model + + +def IR_101(input_size): + """ Constructs a ir-101 model. + """ + model = Backbone(input_size, 100, 'ir') + + return model + + +def IR_152(input_size): + """ Constructs a ir-152 model. + """ + model = Backbone(input_size, 152, 'ir') + + return model + + +def IR_200(input_size): + """ Constructs a ir-200 model. + """ + model = Backbone(input_size, 200, 'ir') + + return model + + +def IR_SE_50(input_size): + """ Constructs a ir_se-50 model. + """ + model = Backbone(input_size, 50, 'ir_se') + + return model + + +def IR_SE_101(input_size): + """ Constructs a ir_se-101 model. + """ + model = Backbone(input_size, 100, 'ir_se') + + return model + + +def IR_SE_152(input_size): + """ Constructs a ir_se-152 model. + """ + model = Backbone(input_size, 152, 'ir_se') + + return model + + +def IR_SE_200(input_size): + """ Constructs a ir_se-200 model. + """ + model = Backbone(input_size, 200, 'ir_se') + + return model + diff --git a/training/networks/base_backbone.py b/training/networks/base_backbone.py new file mode 100644 index 0000000000000000000000000000000000000000..8cbb14439c4c8da93d12edd0f0af442d8f460698 --- /dev/null +++ b/training/networks/base_backbone.py @@ -0,0 +1,32 @@ +import abc +import torch +from typing import Union + +class AbstractBackbone(abc.ABC): + """ + All backbones for detectors should subclass this class. + """ + def __init__(self, config, load_param: Union[bool, str] = False): + """ + config: (dict) + configurations for the model + load_param: (False | True | Path(str)) + False Do not read; True Read the default path; Path Read the required path + """ + pass + + @abc.abstractmethod + def features(self, data_dict: dict) -> torch.tensor: + """ + """ + + @abc.abstractmethod + def classifier(self, features: torch.tensor) -> torch.tensor: + """ + """ + + def init_weights(self, pretrained_path: Union[bool, str]): + """ + This method can be optionally implemented by subclasses. + """ + pass \ No newline at end of file diff --git a/training/networks/cls_hrnet.py b/training/networks/cls_hrnet.py new file mode 100644 index 0000000000000000000000000000000000000000..2c04a0e05817e7a3d91dd2af895aae7c5dc67403 --- /dev/null +++ b/training/networks/cls_hrnet.py @@ -0,0 +1,569 @@ +''' +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2023-0706 + +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (Bin.Xiao@microsoft.com) +# Modified by Ke Sun (sunk@mail.ustc.edu.cn) +# ------------------------------------------------------------------------------ + +The code is mainly modified from the below link: +https://github.com/HRNet/HRNet-Image-Classification/tree/master +''' + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import logging +import functools + +import numpy as np +from typing import Union + +import torch +import torch.nn as nn +import torch._utils +import torch.nn.functional as F + +BN_MOMENTUM = 0.1 +logger = logging.getLogger(__name__) + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, + padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) + self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, + bias=False) + self.bn3 = nn.BatchNorm2d(planes * self.expansion, + momentum=BN_MOMENTUM) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class HighResolutionModule(nn.Module): + def __init__(self, num_branches, blocks, num_blocks, num_inchannels, + num_channels, fuse_method, multi_scale_output=True): + super(HighResolutionModule, self).__init__() + self._check_branches( + num_branches, blocks, num_blocks, num_inchannels, num_channels) + + self.num_inchannels = num_inchannels + self.fuse_method = fuse_method + self.num_branches = num_branches + + self.multi_scale_output = multi_scale_output + + self.branches = self._make_branches( + num_branches, blocks, num_blocks, num_channels) + self.fuse_layers = self._make_fuse_layers() + self.relu = nn.ReLU(False) + + def _check_branches(self, num_branches, blocks, num_blocks, + num_inchannels, num_channels): + if num_branches != len(num_blocks): + error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format( + num_branches, len(num_blocks)) + logger.error(error_msg) + raise ValueError(error_msg) + + if num_branches != len(num_channels): + error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format( + num_branches, len(num_channels)) + logger.error(error_msg) + raise ValueError(error_msg) + + if num_branches != len(num_inchannels): + error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format( + num_branches, len(num_inchannels)) + logger.error(error_msg) + raise ValueError(error_msg) + + def _make_one_branch(self, branch_index, block, num_blocks, num_channels, + stride=1): + downsample = None + if stride != 1 or \ + self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.num_inchannels[branch_index], + num_channels[branch_index] * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(num_channels[branch_index] * block.expansion, + momentum=BN_MOMENTUM), + ) + + layers = [] + layers.append(block(self.num_inchannels[branch_index], + num_channels[branch_index], stride, downsample)) + self.num_inchannels[branch_index] = \ + num_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append(block(self.num_inchannels[branch_index], + num_channels[branch_index])) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + branches = [] + + for i in range(num_branches): + branches.append( + self._make_one_branch(i, block, num_blocks, num_channels)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + if self.num_branches == 1: + return None + + num_branches = self.num_branches + num_inchannels = self.num_inchannels + fuse_layers = [] + for i in range(num_branches if self.multi_scale_output else 1): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], + num_inchannels[i], + 1, + 1, + 0, + bias=False), + nn.BatchNorm2d(num_inchannels[i], + momentum=BN_MOMENTUM), + nn.Upsample(scale_factor=2**(j-i), mode='nearest'))) + elif j == i: + fuse_layer.append(None) + else: + conv3x3s = [] + for k in range(i-j): + if k == i - j - 1: + num_outchannels_conv3x3 = num_inchannels[i] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], + num_outchannels_conv3x3, + 3, 2, 1, bias=False), + nn.BatchNorm2d(num_outchannels_conv3x3, + momentum=BN_MOMENTUM))) + else: + num_outchannels_conv3x3 = num_inchannels[j] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], + num_outchannels_conv3x3, + 3, 2, 1, bias=False), + nn.BatchNorm2d(num_outchannels_conv3x3, + momentum=BN_MOMENTUM), + nn.ReLU(False))) + fuse_layer.append(nn.Sequential(*conv3x3s)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def get_num_inchannels(self): + return self.num_inchannels + + def forward(self, x): + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = [] + for i in range(len(self.fuse_layers)): + y = x[0] if i == 0 else self.fuse_layers[i][0](x[0]) + for j in range(1, self.num_branches): + if i == j: + y = y + x[j] + else: + y = y + self.fuse_layers[i][j](x[j]) + x_fuse.append(self.relu(y)) + + return x_fuse + + +blocks_dict = { + 'BASIC': BasicBlock, + 'BOTTLENECK': Bottleneck +} + + +class HighResolutionNet(nn.Module): + + def __init__(self, cfg): + super(HighResolutionNet, self).__init__() + + self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, + bias=False) + self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM) + self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, + bias=False) + self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM) + self.relu = nn.ReLU(inplace=True) + + self.stage1_cfg = cfg['MODEL']['EXTRA']['STAGE1'] + num_channels = self.stage1_cfg['NUM_CHANNELS'][0] + block = blocks_dict[self.stage1_cfg['BLOCK']] + num_blocks = self.stage1_cfg['NUM_BLOCKS'][0] + self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) + stage1_out_channel = block.expansion*num_channels + + self.stage2_cfg = cfg['MODEL']['EXTRA']['STAGE2'] + num_channels = self.stage2_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage2_cfg['BLOCK']] + num_channels = [ + num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition1 = self._make_transition_layer( + [stage1_out_channel], num_channels) + self.stage2, pre_stage_channels = self._make_stage( + self.stage2_cfg, num_channels) + + self.stage3_cfg = cfg['MODEL']['EXTRA']['STAGE3'] + num_channels = self.stage3_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage3_cfg['BLOCK']] + num_channels = [ + num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition2 = self._make_transition_layer( + pre_stage_channels, num_channels) + self.stage3, pre_stage_channels = self._make_stage( + self.stage3_cfg, num_channels) + + self.stage4_cfg = cfg['MODEL']['EXTRA']['STAGE4'] + num_channels = self.stage4_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage4_cfg['BLOCK']] + num_channels = [ + num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition3 = self._make_transition_layer( + pre_stage_channels, num_channels) + self.stage4, pre_stage_channels = self._make_stage( + self.stage4_cfg, num_channels, multi_scale_output=True) + + # Classification Head + self.incre_modules, self.downsamp_modules, \ + self.final_layer = self._make_head(pre_stage_channels) + + self.fc = nn.Linear(2048, 1000) + + + def _make_head(self, pre_stage_channels): + head_block = Bottleneck + head_channels = [32, 64, 128, 256] + + # Increasing the #channels on each resolution + # from C, 2C, 4C, 8C to 128, 256, 512, 1024 + incre_modules = [] + for i, channels in enumerate(pre_stage_channels): + incre_module = self._make_layer(head_block, + channels, + head_channels[i], + 1, + stride=1) + incre_modules.append(incre_module) + incre_modules = nn.ModuleList(incre_modules) + + # downsampling modules + downsamp_modules = [] + for i in range(len(pre_stage_channels)-1): + in_channels = head_channels[i] * head_block.expansion + out_channels = head_channels[i+1] * head_block.expansion + + downsamp_module = nn.Sequential( + nn.Conv2d(in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=2, + padding=1), + nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM), + nn.ReLU(inplace=True) + ) + + downsamp_modules.append(downsamp_module) + downsamp_modules = nn.ModuleList(downsamp_modules) + + final_layer = nn.Sequential( + nn.Conv2d( + in_channels=head_channels[3] * head_block.expansion, + out_channels=2048, + kernel_size=1, + stride=1, + padding=0 + ), + nn.BatchNorm2d(2048, momentum=BN_MOMENTUM), + nn.ReLU(inplace=True) + ) + + return incre_modules, downsamp_modules, final_layer + + def _make_transition_layer( + self, num_channels_pre_layer, num_channels_cur_layer): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append(nn.Sequential( + nn.Conv2d(num_channels_pre_layer[i], + num_channels_cur_layer[i], + 3, + 1, + 1, + bias=False), + nn.BatchNorm2d( + num_channels_cur_layer[i], momentum=BN_MOMENTUM), + nn.ReLU(inplace=True))) + else: + transition_layers.append(None) + else: + conv3x3s = [] + for j in range(i+1-num_branches_pre): + inchannels = num_channels_pre_layer[-1] + outchannels = num_channels_cur_layer[i] \ + if j == i-num_branches_pre else inchannels + conv3x3s.append(nn.Sequential( + nn.Conv2d( + inchannels, outchannels, 3, 2, 1, bias=False), + nn.BatchNorm2d(outchannels, momentum=BN_MOMENTUM), + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv3x3s)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, inplanes, planes, blocks, stride=1): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM), + ) + + layers = [] + layers.append(block(inplanes, planes, stride, downsample)) + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(inplanes, planes)) + + return nn.Sequential(*layers) + + def _make_stage(self, layer_config, num_inchannels, + multi_scale_output=True): + num_modules = layer_config['NUM_MODULES'] + num_branches = layer_config['NUM_BRANCHES'] + num_blocks = layer_config['NUM_BLOCKS'] + num_channels = layer_config['NUM_CHANNELS'] + block = blocks_dict[layer_config['BLOCK']] + fuse_method = layer_config['FUSE_METHOD'] + + modules = [] + for i in range(num_modules): + # multi_scale_output is only used last module + if not multi_scale_output and i == num_modules - 1: + reset_multi_scale_output = False + else: + reset_multi_scale_output = True + + modules.append( + HighResolutionModule(num_branches, + block, + num_blocks, + num_inchannels, + num_channels, + fuse_method, + reset_multi_scale_output) + ) + num_inchannels = modules[-1].get_num_inchannels() + + return nn.Sequential(*modules), num_inchannels + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + x = self.layer1(x) + + x_list = [] + for i in range(self.stage2_cfg['NUM_BRANCHES']): + if self.transition1[i] is not None: + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + + x_list = [] + for i in range(self.stage3_cfg['NUM_BRANCHES']): + if self.transition2[i] is not None: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + + x_list = [] + for i in range(self.stage4_cfg['NUM_BRANCHES']): + if self.transition3[i] is not None: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage4(x_list) + + # Classification Head + y = self.incre_modules[0](y_list[0]) + for i in range(len(self.downsamp_modules)): + y = self.incre_modules[i+1](y_list[i+1]) + \ + self.downsamp_modules[i](y) + + y = self.final_layer(y) + + if torch._C._get_tracing_state(): + y = y.flatten(start_dim=2).mean(dim=2) + else: + y = F.avg_pool2d(y, kernel_size=y.size() + [2:]).view(y.size(0), -1) + + y = self.fc(y) + + return y + + def features(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + x = self.layer1(x) + + x_list = [] + for i in range(self.stage2_cfg['NUM_BRANCHES']): + if self.transition1[i] is not None: + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + + x_list = [] + for i in range(self.stage3_cfg['NUM_BRANCHES']): + if self.transition2[i] is not None: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + + x_list = [] + for i in range(self.stage4_cfg['NUM_BRANCHES']): + if self.transition3[i] is not None: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage4(x_list) + + # Upsampling + x0, x1, x2, x3 = y_list + x0_h, x0_w = x0.size(2), x0.size(3) + x1 = F.upsample(x1, size=(x0_h, x0_w), mode='bilinear') + x2 = F.upsample(x2, size=(x0_h, x0_w), mode='bilinear') + x3 = F.upsample(x3, size=(x0_h, x0_w), mode='bilinear') + + x_out = torch.cat([x0, x1, x2, x3], 1) + + #print(x_out.size()) + + return x_out + + def classifier(self, x): + # Classification Head + y = self.incre_modules[0](x[0]) + for i in range(len(self.downsamp_modules)): + y = self.incre_modules[i+1](x[i+1]) + \ + self.downsamp_modules[i](y) + + y = self.final_layer(y) + + if torch._C._get_tracing_state(): + y = y.flatten(start_dim=2).mean(dim=2) + else: + y = F.avg_pool2d(y, kernel_size=y.size() + [2:]).view(y.size(0), -1) + + y = self.fc(y) + +def get_cls_net(config, **kwargs): + model = HighResolutionNet(config, **kwargs) + return model diff --git a/training/networks/efficientnetb4.py b/training/networks/efficientnetb4.py new file mode 100644 index 0000000000000000000000000000000000000000..e2c2ea71ffa9d6c1aa259742d0f10345aff470b6 --- /dev/null +++ b/training/networks/efficientnetb4.py @@ -0,0 +1,112 @@ +''' +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2023-0706 + +The code is for EfficientNetB4 backbone. +''' + +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Union +from efficientnet_pytorch import EfficientNet +from metrics.registry import BACKBONE +import os + +@BACKBONE.register_module(module_name="efficientnetb4") +class EfficientNetB4(nn.Module): + def __init__(self, efficientnetb4_config): + super(EfficientNetB4, self).__init__() + """ Constructor + Args: + efficientnetb4_config: configuration file with the dict format + """ + self.num_classes = efficientnetb4_config["num_classes"] + inc = efficientnetb4_config["inc"] + self.dropout = efficientnetb4_config["dropout"] + self.mode = efficientnetb4_config["mode"] + + # Load the EfficientNet-B4 model without pre-trained weights + if efficientnetb4_config['pretrained']: + self.efficientnet = EfficientNet.from_pretrained('efficientnet-b4',weights_path=efficientnetb4_config['pretrained']) # FIXME: load the pretrained weights from online + # self.efficientnet = EfficientNet.from_name('efficientnet-b4') + else: + self.efficientnet = EfficientNet.from_name('efficientnet-b4') + # Modify the first convolutional layer to accept input tensors with 'inc' channels + self.efficientnet._conv_stem = nn.Conv2d(inc, 48, kernel_size=3, stride=2, bias=False) + + # Remove the last layer (the classifier) from the EfficientNet-B4 model + self.efficientnet._fc = nn.Identity() + + if self.dropout: + # Add dropout layer if specified + self.dropout_layer = nn.Dropout(p=self.dropout) + + # Initialize the last_layer layer + self.last_layer = nn.Linear(1792, self.num_classes) + + if self.mode == 'adjust_channel': + self.adjust_channel = nn.Sequential( + nn.Conv2d(1792, 512, 1, 1), + nn.BatchNorm2d(512), + nn.ReLU(inplace=True), + ) + + def block_part1(self,x): + x = self.efficientnet._swish(self.efficientnet._bn0(self.efficientnet._conv_stem(x))) + # x = self.efficientnet._blocks[0:10](x) + for idx, block in enumerate(self.efficientnet._blocks[:10]): + drop_connect_rate = self.efficientnet._global_params.drop_connect_rate + if drop_connect_rate: + drop_connect_rate *= float(idx+0) / len(self.efficientnet._blocks) # scale drop connect_rate + x = block(x, drop_connect_rate=drop_connect_rate) + return x + + def block_part2(self,x): + for idx, block in enumerate(self.efficientnet._blocks[10:22]): + drop_connect_rate = self.efficientnet._global_params.drop_connect_rate + if drop_connect_rate: + drop_connect_rate *= float(idx+10) / len(self.efficientnet._blocks) # scale drop connect_rate + x = block(x, drop_connect_rate=drop_connect_rate) + return x + + def block_part3(self,x): + for idx, block in enumerate(self.efficientnet._blocks[22:]): + drop_connect_rate = self.efficientnet._global_params.drop_connect_rate + if drop_connect_rate: + drop_connect_rate *= float(idx+22) / len(self.efficientnet._blocks) # scale drop connect_rate + x = block(x, drop_connect_rate=drop_connect_rate) + x = self.efficientnet._swish(self.efficientnet._bn1(self.efficientnet._conv_head(x))) + return x + + + def features(self, x): + # Extract features from the EfficientNet-B4 model + x = self.efficientnet.extract_features(x) + if self.mode == 'adjust_channel': + x = self.adjust_channel(x) + return x + def end_points(self,x): + return self.efficientnet.extract_endpoints(x) + def classifier(self, x): + x = F.adaptive_avg_pool2d(x, (1, 1)) + x = x.view(x.size(0), -1) + + # Apply dropout if specified + if self.dropout: + x = self.dropout_layer(x) + + # Apply last_layer layer + self.last_emb = x + y = self.last_layer(x) + return y + + def forward(self, x): + # Extract features and apply classifier layer + x = self.features(x) + # if False: + # x = F.adaptive_avg_pool2d(x, (1, 1)) + # x = x.view(x.size(0), -1) + x = self.classifier(x) + return x diff --git a/training/networks/iresnet.py b/training/networks/iresnet.py new file mode 100644 index 0000000000000000000000000000000000000000..12a454cb9acc227b8968806cf3c47c4145e6304b --- /dev/null +++ b/training/networks/iresnet.py @@ -0,0 +1,191 @@ +import torch +from torch import nn +import torch.nn.functional as F + +__all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100', 'iresnet200'] + +def set_requires_grad(model, val): + for p in model.parameters(): + p.requires_grad = val + +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, + out_planes, + kernel_size=3, + stride=stride, + padding=dilation, + groups=groups, + bias=False, + dilation=dilation) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, + out_planes, + kernel_size=1, + stride=stride, + bias=False) + + +class IBasicBlock(nn.Module): + expansion = 1 + def __init__(self, inplanes, planes, stride=1, downsample=None, + groups=1, base_width=64, dilation=1): + super(IBasicBlock, self).__init__() + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,) + self.conv1 = conv3x3(inplanes, planes) + self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,) + self.prelu = nn.PReLU(planes) + self.conv2 = conv3x3(planes, planes, stride) + self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + out = self.bn1(x) + out = self.conv1(out) + out = self.bn2(out) + out = self.prelu(out) + out = self.conv2(out) + out = self.bn3(out) + if self.downsample is not None: + identity = self.downsample(x) + out += identity + return out + + +class IResNet(nn.Module): + def __init__(self, + block, layers, dropout=0, num_features=512, zero_init_residual=False, + groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False, fc_scale=7*7): + super(IResNet, self).__init__() + self.fp16 = fp16 + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05) + self.prelu = nn.PReLU(self.inplanes) + self.layer1 = self._make_layer(block, 64, layers[0], stride=2) + self.layer2 = self._make_layer(block, + 128, + layers[1], + stride=2, + dilate=replace_stride_with_dilation[0]) + + self.layer3 = self._make_layer(block, + 256, + layers[2], + stride=2, + dilate=replace_stride_with_dilation[1]) + set_requires_grad(self.layer1, False) + set_requires_grad(self.layer2, False) + set_requires_grad(self.layer3, False) + self.layer4 = self._make_layer(block, + 512, + layers[3], + stride=2, + dilate=replace_stride_with_dilation[2]) + self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05,) + self.dropout = nn.Dropout(p=dropout, inplace=True) + self.fc = nn.Linear(512 * block.expansion * fc_scale, num_features) + self.features = nn.BatchNorm1d(num_features, eps=1e-05) + nn.init.constant_(self.features.weight, 1.0) + self.features.weight.requires_grad = False + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.normal_(m.weight, 0, 0.1) + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + if zero_init_residual: + for m in self.modules(): + if isinstance(m, IBasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilate=False): + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ), + ) + layers = [] + layers.append( + block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append( + block(self.inplanes, + planes, + groups=self.groups, + base_width=self.base_width, + dilation=self.dilation)) + + return nn.Sequential(*layers) + + def forward(self, x): + with torch.cuda.amp.autocast(self.fp16): + x = self.conv1(x) + x = self.bn1(x) + x = self.prelu(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + x = self.bn2(x) + x = self.dropout(x) + x = F.avg_pool2d(x, kernel_size=2, stride=2, padding=0) + return x + + +def _iresnet(arch, block, layers, pretrained, progress, **kwargs): + model = IResNet(block, layers, **kwargs) + if pretrained: + raise ValueError() + return model + + +def iresnet18(pretrained=False, progress=True, **kwargs): + return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained, + progress, **kwargs) + + +def iresnet34(pretrained=False, progress=True, **kwargs): + return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained, + progress, **kwargs) + + +def iresnet50(pretrained=False, progress=True, **kwargs): + return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained, + progress, **kwargs) + + +def iresnet100(pretrained=False, progress=True, **kwargs): + return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained, + progress, **kwargs) + + +def iresnet200(pretrained=False, progress=True, **kwargs): + return _iresnet('iresnet200', IBasicBlock, [6, 26, 60, 6], pretrained, + progress, **kwargs) diff --git a/training/networks/iresnet_iid.py b/training/networks/iresnet_iid.py new file mode 100644 index 0000000000000000000000000000000000000000..5776ad2f64a6c19c376adde5e5a685d8241c5011 --- /dev/null +++ b/training/networks/iresnet_iid.py @@ -0,0 +1,196 @@ +import torch +from torch import nn +from torch.utils.checkpoint import checkpoint + + +__all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100', 'iresnet200'] +using_ckpt = False + +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, + out_planes, + kernel_size=3, + stride=stride, + padding=dilation, + groups=groups, + bias=False, + dilation=dilation) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, + out_planes, + kernel_size=1, + stride=stride, + bias=False) + + +class IBasicBlock(nn.Module): + expansion = 1 + def __init__(self, inplanes, planes, stride=1, downsample=None, + groups=1, base_width=64, dilation=1): + super(IBasicBlock, self).__init__() + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,) + self.conv1 = conv3x3(inplanes, planes) + self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,) + self.prelu = nn.PReLU(planes) + self.conv2 = conv3x3(planes, planes, stride) + self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,) + self.downsample = downsample + self.stride = stride + + def forward_impl(self, x): + identity = x + out = self.bn1(x) + out = self.conv1(out) + out = self.bn2(out) + out = self.prelu(out) + out = self.conv2(out) + out = self.bn3(out) + if self.downsample is not None: + identity = self.downsample(x) + out += identity + return out + + def forward(self, x): + if self.training and using_ckpt: + return checkpoint(self.forward_impl, x) + else: + return self.forward_impl(x) + + +class IResNet(nn.Module): + fc_scale = 7 * 7 + def __init__(self, + block, layers, dropout=0, num_features=512, zero_init_residual=False, + groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False): + super(IResNet, self).__init__() + self.extra_gflops = 0.0 + self.fp16 = fp16 + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05) + self.prelu = nn.PReLU(self.inplanes) + self.layer1 = self._make_layer(block, 64, layers[0], stride=2) + self.layer2 = self._make_layer(block, + 128, + layers[1], + stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, + 256, + layers[2], + stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, + 512, + layers[3], + stride=2, + dilate=replace_stride_with_dilation[2]) + self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05,) + self.dropout = nn.Dropout(p=dropout, inplace=True) + self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features) + self.features = nn.BatchNorm1d(num_features, eps=1e-05) + nn.init.constant_(self.features.weight, 1.0) + self.features.weight.requires_grad = False + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.normal_(m.weight, 0, 0.1) + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + if zero_init_residual: + for m in self.modules(): + if isinstance(m, IBasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilate=False): + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ), + ) + layers = [] + layers.append( + block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append( + block(self.inplanes, + planes, + groups=self.groups, + base_width=self.base_width, + dilation=self.dilation)) + + return nn.Sequential(*layers) + + def forward(self, x): + with torch.cuda.amp.autocast(self.fp16): + x = self.conv1(x) + x = self.bn1(x) + x = self.prelu(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + x = self.bn2(x) + x = torch.flatten(x, 1) + x = self.dropout(x) + x = self.fc(x.float() if self.fp16 else x) + if x.size(0)>1: + x = self.features(x) + return x + + +def _iresnet(arch, block, layers, pretrained, progress, **kwargs): + model = IResNet(block, layers, **kwargs) + if pretrained: + raise ValueError() + return model + + +def iresnet18(pretrained=False, progress=True, **kwargs): + return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained, + progress, **kwargs) + + +def iresnet34(pretrained=False, progress=True, **kwargs): + return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained, + progress, **kwargs) + + +def iresnet50(pretrained=False, progress=True, **kwargs): + return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained, + progress, **kwargs) + + +def iresnet100(pretrained=False, progress=True, **kwargs): + return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained, + progress, **kwargs) + + +def iresnet200(pretrained=False, progress=True, **kwargs): + return _iresnet('iresnet200', IBasicBlock, [6, 26, 60, 6], pretrained, + progress, **kwargs) \ No newline at end of file diff --git a/training/networks/mesonet.py b/training/networks/mesonet.py new file mode 100644 index 0000000000000000000000000000000000000000..07429325acfaad62cc11825dfcc998b52f538b60 --- /dev/null +++ b/training/networks/mesonet.py @@ -0,0 +1,189 @@ +''' +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2023-0706 + +The code is mainly modified from the below link: +https://github.com/HongguLiu/MesoNet-Pytorch +''' + +import os +import argparse +import logging + +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + +import torch.utils.model_zoo as model_zoo +from torch.nn import init +from typing import Union +from metrics.registry import BACKBONE + +logger = logging.getLogger(__name__) + +@BACKBONE.register_module(module_name="meso4") +class Meso4(nn.Module): + def __init__(self, meso4_config): + super(Meso4, self).__init__() + self.num_classes = meso4_config["num_classes"] + inc = meso4_config["inc"] + self.conv1 = nn.Conv2d(inc, 8, 3, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(8) + self.relu = nn.ReLU(inplace=True) + self.leakyrelu = nn.LeakyReLU(0.1) + + self.conv2 = nn.Conv2d(8, 8, 5, padding=2, bias=False) + self.bn2 = nn.BatchNorm2d(16) + self.conv3 = nn.Conv2d(8, 16, 5, padding=2, bias=False) + self.conv4 = nn.Conv2d(16, 16, 5, padding=2, bias=False) + self.maxpooling1 = nn.MaxPool2d(kernel_size=(2, 2)) + self.maxpooling2 = nn.MaxPool2d(kernel_size=(4, 4)) + #flatten: x = x.view(x.size(0), -1) + self.dropout = nn.Dropout2d(0.5) + self.fc1 = nn.Linear(16*8*8, 16) + self.fc2 = nn.Linear(16, self.num_classes) + + + def features(self, input): + x = self.conv1(input) #(8, 256, 256) + x = self.relu(x) + x = self.bn1(x) + x = self.maxpooling1(x) #(8, 128, 128) + + x = self.conv2(x) #(8, 128, 128) + x = self.relu(x) + x = self.bn1(x) + x = self.maxpooling1(x) #(8, 64, 64) + + x = self.conv3(x) #(16, 64, 64) + x = self.relu(x) + x = self.bn2(x) + x = self.maxpooling1(x) #(16, 32, 32) + + x = self.conv4(x) #(16, 32, 32) + x = self.relu(x) + x = self.bn2(x) + x = self.maxpooling2(x) #(16, 8, 8) + x = x.view(x.size(0), -1) #(Batch, 16*8*8) + + return x + + def classifier(self, feature): + out = self.dropout(feature) + out = self.fc1(out) #(Batch, 16) + out = self.leakyrelu(out) + out = self.dropout(out) + out = self.fc2(out) + return out + + def forward(self, input): + x = self.features(input) + out = self.classifier(x) + return out, x + + +@BACKBONE.register_module(module_name="meso4Inception") +class MesoInception4(nn.Module): + def __init__(self, mesoInception4_config): + super(MesoInception4, self).__init__() + self.num_classes = mesoInception4_config["num_classes"] + inc = mesoInception4_config["inc"] + #InceptionLayer1 + self.Incption1_conv1 = nn.Conv2d(3, 1, 1, padding=0, bias=False) + self.Incption1_conv2_1 = nn.Conv2d(3, 4, 1, padding=0, bias=False) + self.Incption1_conv2_2 = nn.Conv2d(4, 4, 3, padding=1, bias=False) + self.Incption1_conv3_1 = nn.Conv2d(3, 4, 1, padding=0, bias=False) + self.Incption1_conv3_2 = nn.Conv2d(4, 4, 3, padding=2, dilation=2, bias=False) + self.Incption1_conv4_1 = nn.Conv2d(3, 2, 1, padding=0, bias=False) + self.Incption1_conv4_2 = nn.Conv2d(2, 2, 3, padding=3, dilation=3, bias=False) + self.Incption1_bn = nn.BatchNorm2d(11) + + + #InceptionLayer2 + self.Incption2_conv1 = nn.Conv2d(11, 2, 1, padding=0, bias=False) + self.Incption2_conv2_1 = nn.Conv2d(11, 4, 1, padding=0, bias=False) + self.Incption2_conv2_2 = nn.Conv2d(4, 4, 3, padding=1, bias=False) + self.Incption2_conv3_1 = nn.Conv2d(11, 4, 1, padding=0, bias=False) + self.Incption2_conv3_2 = nn.Conv2d(4, 4, 3, padding=2, dilation=2, bias=False) + self.Incption2_conv4_1 = nn.Conv2d(11, 2, 1, padding=0, bias=False) + self.Incption2_conv4_2 = nn.Conv2d(2, 2, 3, padding=3, dilation=3, bias=False) + self.Incption2_bn = nn.BatchNorm2d(12) + + #Normal Layer + self.conv1 = nn.Conv2d(12, 16, 5, padding=2, bias=False) + self.relu = nn.ReLU(inplace=True) + self.leakyrelu = nn.LeakyReLU(0.1) + self.bn1 = nn.BatchNorm2d(16) + self.maxpooling1 = nn.MaxPool2d(kernel_size=(2, 2)) + + self.conv2 = nn.Conv2d(16, 16, 5, padding=2, bias=False) + self.maxpooling2 = nn.MaxPool2d(kernel_size=(4, 4)) + + self.dropout = nn.Dropout2d(0.5) + self.fc1 = nn.Linear(16*8*8, 16) + self.fc2 = nn.Linear(16, self.num_classes) + + + #InceptionLayer + def InceptionLayer1(self, input): + x1 = self.Incption1_conv1(input) + x2 = self.Incption1_conv2_1(input) + x2 = self.Incption1_conv2_2(x2) + x3 = self.Incption1_conv3_1(input) + x3 = self.Incption1_conv3_2(x3) + x4 = self.Incption1_conv4_1(input) + x4 = self.Incption1_conv4_2(x4) + y = torch.cat((x1, x2, x3, x4), 1) + y = self.Incption1_bn(y) + y = self.maxpooling1(y) + + return y + + def InceptionLayer2(self, input): + x1 = self.Incption2_conv1(input) + x2 = self.Incption2_conv2_1(input) + x2 = self.Incption2_conv2_2(x2) + x3 = self.Incption2_conv3_1(input) + x3 = self.Incption2_conv3_2(x3) + x4 = self.Incption2_conv4_1(input) + x4 = self.Incption2_conv4_2(x4) + y = torch.cat((x1, x2, x3, x4), 1) + y = self.Incption2_bn(y) + y = self.maxpooling1(y) + + return y + + + def features(self, input): + x = self.InceptionLayer1(input) #(Batch, 11, 128, 128) + x = self.InceptionLayer2(x) #(Batch, 12, 64, 64) + + x = self.conv1(x) #(Batch, 16, 64 ,64) + x = self.relu(x) + x = self.bn1(x) + x = self.maxpooling1(x) #(Batch, 16, 32, 32) + + x = self.conv2(x) #(Batch, 16, 32, 32) + x = self.relu(x) + x = self.bn1(x) + x = self.maxpooling2(x) #(Batch, 16, 8, 8) + + x = x.view(x.size(0), -1) #(Batch, 16*8*8) + + return x + + def classifier(self, feature): + + out = self.dropout(feature) + out = self.fc1(out) #(Batch, 16) + out = self.leakyrelu(out) + out = self.dropout(out) + out = self.fc2(out) + return out + + def forward(self, input): + x = self.features(input) + out = self.classifier(x) + return out, x diff --git a/training/networks/resnet.py b/training/networks/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..64d649ab427b06444a9b4b17c53869640ebbe2b6 --- /dev/null +++ b/training/networks/resnet.py @@ -0,0 +1,501 @@ +# -*- coding: utf-8 -*- +""" +Created on 18-5-21 下午5:26 + +@author: ronghuaiyang +""" +import torch +import torch.nn as nn +import math +import torch.utils.model_zoo as model_zoo +import torch.nn.utils.weight_norm as weight_norm +import torch.nn.functional as F + + +# __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', +# 'resnet152'] + + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +class AdaIN(nn.Module): + def __init__(self, eps=1e-5): + super().__init__() + self.eps = eps + # self.l1 = nn.Linear(num_classes, in_channel*4, bias=True) #bias is good :) + + def c_norm(self, x, bs, ch, eps=1e-7): + # assert isinstance(x, torch.cuda.FloatTensor) + x_var = x.var(dim=-1) + eps + x_std = x_var.sqrt().view(bs, ch, 1, 1) + x_mean = x.mean(dim=-1).view(bs, ch, 1, 1) + return x_std, x_mean + + def forward(self, x, y): + assert x.size(0)==y.size(0) + size = x.size() + bs, ch = size[:2] + x_ = x.view(bs, ch, -1) + y_ = y.reshape(bs, ch, -1) + x_std, x_mean = self.c_norm(x_, bs, ch, eps=self.eps) + y_std, y_mean = self.c_norm(y_, bs, ch, eps=self.eps) + out = ((x - x_mean.expand(size)) / x_std.expand(size)) \ + * y_std.expand(size) + y_mean.expand(size) + return out + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class BasicBlock_adain(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock_adain, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.adain1 = AdaIN() + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.adain2 = AdaIN() + self.downsample = downsample + self.stride = stride + + def forward(self, feat): # x is content, c is style + x, c = feat + residual = x + + x = self.conv1(x) + out = self.adain1(x, c) + out = self.relu(out) + + out = self.conv2(out) + out = self.adain2(out, c) + + if self.downsample is not None: + residual = self.downsample(residual) + + out += residual + out = self.relu(out) + + return (out, c) + + +class IRBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True): + super(IRBlock, self).__init__() + self.bn0 = nn.BatchNorm2d(inplanes) + self.conv1 = conv3x3(inplanes, inplanes) + self.bn1 = nn.BatchNorm2d(inplanes) + self.prelu = nn.PReLU() + self.conv2 = conv3x3(inplanes, planes, stride) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + self.use_se = use_se + if self.use_se: + self.se = SEBlock(planes) + + def forward(self, x): + residual = x + out = self.bn0(x) + out = self.conv1(out) + out = self.bn1(out) + out = self.prelu(out) + + out = self.conv2(out) + out = self.bn2(out) + if self.use_se: + out = self.se(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.prelu(out) + + return out + + +class IRBlock_3conv(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True): + super(IRBlock_3conv, self).__init__() + self.bn0 = nn.BatchNorm2d(inplanes) + self.conv1 = conv3x3(inplanes, inplanes) + self.bn1 = nn.BatchNorm2d(inplanes) + self.prelu1 = nn.PReLU() + self.conv2 = conv3x3(inplanes, planes, stride) + self.bn2 = nn.BatchNorm2d(planes) + self.prelu2 = nn.PReLU() + self.conv3 = conv3x3(planes, planes) + self.bn3 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + self.use_se = use_se + if self.use_se: + self.se = SEBlock(planes) + self.prelu = nn.PReLU() + + def forward(self, x): + residual = x + out = self.bn0(x) + out = self.conv1(out) + out = self.bn1(out) + out = self.prelu1(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.prelu2(out) + + out = self.conv3(out) + out = self.bn3(out) + if self.use_se: + out = self.se(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.prelu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, + padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d( + planes, planes * self.expansion, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class SEBlock(nn.Module): + def __init__(self, channel, reduction=16): + super(SEBlock, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Sequential( + nn.Linear(channel, channel // reduction), + nn.PReLU(), + nn.Linear(channel // reduction, channel), + nn.Sigmoid() + ) + + def forward(self, x): + b, c, _, _ = x.size() + y = self.avg_pool(x).view(b, c) + y = self.fc(y).view(b, c, 1, 1) + return x * y + + +class ResNetFace(nn.Module): + def __init__(self, block, layers, use_se=True, inc=3): + self.inplanes = 64 + self.use_se = use_se + super(ResNetFace, self).__init__() + self.conv1 = nn.Conv2d(inc, 64, kernel_size=3, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.prelu = nn.PReLU() + self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + self.bn4 = nn.BatchNorm2d(512) + #self.dropout = nn.Dropout() + self.fc5 = nn.Linear(512 * 8 * 8, 512) + #self.bn5 = nn.BatchNorm1d(512) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.xavier_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.xavier_normal_(m.weight) + nn.init.constant_(m.bias, 0) + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + layers = [] + layers.append(block(self.inplanes, planes, stride, + downsample, use_se=self.use_se)) + self.inplanes = planes + for i in range(1, blocks): + layers.append(block(self.inplanes, planes, use_se=self.use_se)) + + return nn.Sequential(*layers) + + def features(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.prelu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + x = self.bn4(x) + + return x + + def classifier(self, x): + x = x.view(x.size(0), -1) + x = self.fc5(x) + + return x + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.prelu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + x = self.bn4(x) + #x = self.dropout(x) + x = x.view(x.size(0), -1) + x = self.fc5(x) + #x = self.bn5(x) + + return x + + +class ResNet(nn.Module): + + def __init__(self, block, layers, basedim=32, inc=1): + self.inplanes = basedim + super(ResNet, self).__init__() + # self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, + # bias=False) + self.conv1 = nn.Conv2d(inc, self.inplanes, kernel_size=3, stride=1, padding=1, + bias=False) + self.bn1 = nn.BatchNorm2d(self.inplanes) + self.relu = nn.ReLU(inplace=True) + # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, basedim, layers[0], stride=2) + self.layer2 = self._make_layer(block, 2*basedim, layers[1], stride=2) + self.layer3 = self._make_layer(block, 4*basedim, layers[2], stride=2) + self.layer4 = self._make_layer(block, 8*basedim, layers[3], stride=2) + # self.avgpool = nn.AvgPool2d(8, stride=1) + # self.fc = nn.Linear(512 * block.expansion, num_classes) + self.fc5 = nn.Linear(512 * 8 * 8, 512) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def features(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + # x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + return x + + def classifier(self, x): + x = x.view(x.size(0), -1) + x = self.fc5(x) + + return x + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + # x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + # x = nn.AvgPool2d(kernel_size=x.size()[2:])(x) + # x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc5(x) + + return x + + +def resnet18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) + return model + + +def resnet34(pretrained=False, **kwargs): + """Constructs a ResNet-34 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) + return model + + +def resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) + return model + + +def resnet101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) + return model + + +def resnet152(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) + return model + + +def resnet_face18(use_se=True, **kwargs): + model = ResNetFace(IRBlock, [2, 2, 2, 2], use_se=use_se, **kwargs) + return model + + +def resnet_face62(use_se=True, **kwargs): + model = ResNetFace(IRBlock_3conv, [3, 4, 10, 3], use_se=use_se, **kwargs) + return model + +if __name__ == "__main__": + net = HR_resnet() + dummy = torch.rand(10,3,256,256) + x = net(dummy) + print('output:', x.size()) diff --git a/training/networks/resnet34.py b/training/networks/resnet34.py new file mode 100644 index 0000000000000000000000000000000000000000..0b6d1d00b49d6c4fbabe832f9d683e71e8bf898e --- /dev/null +++ b/training/networks/resnet34.py @@ -0,0 +1,60 @@ +''' +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2023-0706 + +The code is for ResNet34 backbone. +''' + +import os +import logging +from typing import Union +import torch +import torchvision +import torch.nn as nn +import torch.nn.functional as F +from metrics.registry import BACKBONE + +logger = logging.getLogger(__name__) + +@BACKBONE.register_module(module_name="resnet34") +class ResNet34(nn.Module): + def __init__(self, resnet_config): + super(ResNet34, self).__init__() + """ Constructor + Args: + resnet_config: configuration file with the dict format + """ + self.num_classes = resnet_config["num_classes"] + inc = resnet_config["inc"] + self.mode = resnet_config["mode"] + + # Define layers of the backbone + resnet = torchvision.models.resnet34(pretrained=True) # FIXME: download the pretrained weights from online + # resnet.conv1 = nn.Conv2d(inc, 64, kernel_size=7, stride=2, padding=3, bias=False) + self.resnet = torch.nn.Sequential(*list(resnet.children())[:-2]) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512, self.num_classes) + + if self.mode == 'adjust_channel': + self.adjust_channel = nn.Sequential( + nn.Conv2d(512, 512, 1, 1), + nn.BatchNorm2d(512), + nn.ReLU(inplace=True), + ) + + + def features(self, inp): + x = self.resnet(inp) + return x + + def classifier(self, features): + x = self.avgpool(features) + x = x.view(x.size(0), -1) + x = self.fc(x) + return x + + def forward(self, inp): + x = self.features(inp) + out = self.classifier(x) + return out diff --git a/training/networks/vgg.py b/training/networks/vgg.py new file mode 100644 index 0000000000000000000000000000000000000000..da74f9c6a1ca1a93470cefeb68781ddee05d3460 --- /dev/null +++ b/training/networks/vgg.py @@ -0,0 +1,143 @@ +"""A VGG-based perceptual loss function for PyTorch.""" + +import torch +from torch import nn +from torch.nn import functional as F +from torchvision import models, transforms + + +class Lambda(nn.Module): + """Wraps a callable in an :class:`nn.Module` without registering it.""" + + def __init__(self, func): + super().__init__() + object.__setattr__(self, 'forward', func) + + def extra_repr(self): + return getattr(self.forward, '__name__', type(self.forward).__name__) + '()' + + +class WeightedLoss(nn.ModuleList): + """A weighted combination of multiple loss functions.""" + + def __init__(self, losses, weights, verbose=False): + super().__init__() + for loss in losses: + self.append(loss if isinstance(loss, nn.Module) else Lambda(loss)) + self.weights = weights + self.verbose = verbose + + def _print_losses(self, losses): + for i, loss in enumerate(losses): + print(f'({i}) {type(self[i]).__name__}: {loss.item()}') + + def forward(self, *args, **kwargs): + losses = [] + for loss, weight in zip(self, self.weights): + losses.append(loss(*args, **kwargs) * weight) + if self.verbose: + self._print_losses(losses) + return sum(losses) + + +class TVLoss(nn.Module): + """Total variation loss (Lp penalty on image gradient magnitude). + The input must be 4D. If a target (second parameter) is passed in, it is + ignored. + ``p=1`` yields the vectorial total variation norm. It is a generalization + of the originally proposed (isotropic) 2D total variation norm (see + (see https://en.wikipedia.org/wiki/Total_variation_denoising) for color + images. On images with a single channel it is equal to the 2D TV norm. + ``p=2`` yields a variant that is often used for smoothing out noise in + reconstructions of images from neural network feature maps (see Mahendran + and Vevaldi, "Understanding Deep Image Representations by Inverting + Them", https://arxiv.org/abs/1412.0035) + :attr:`reduction` can be set to ``'mean'``, ``'sum'``, or ``'none'`` + similarly to the loss functions in :mod:`torch.nn`. The default is + ``'mean'``. + """ + + def __init__(self, p, reduction='mean', eps=1e-8): + super().__init__() + if p not in {1, 2}: + raise ValueError('p must be 1 or 2') + if reduction not in {'mean', 'sum', 'none'}: + raise ValueError("reduction must be 'mean', 'sum', or 'none'") + self.p = p + self.reduction = reduction + self.eps = eps + + def forward(self, input, target=None): + input = F.pad(input, (0, 1, 0, 1), 'replicate') + x_diff = input[..., :-1, :-1] - input[..., :-1, 1:] + y_diff = input[..., :-1, :-1] - input[..., 1:, :-1] + diff = x_diff**2 + y_diff**2 + if self.p == 1: + diff = (diff + self.eps).mean(dim=1, keepdims=True).sqrt() + if self.reduction == 'mean': + return diff.mean() + if self.reduction == 'sum': + return diff.sum() + return diff + + +class VGGLoss(nn.Module): + """Computes the VGG perceptual loss between two batches of images. + The input and target must be 4D tensors with three channels + ``(B, 3, H, W)`` and must have equivalent shapes. Pixel values should be + normalized to the range 0–1. + The VGG perceptual loss is the mean squared difference between the features + computed for the input and target at layer :attr:`layer` (default 8, or + ``relu2_2``) of the pretrained model specified by :attr:`model` (either + ``'vgg16'`` (default) or ``'vgg19'``). + If :attr:`shift` is nonzero, a random shift of at most :attr:`shift` + pixels in both height and width will be applied to all images in the input + and target. The shift will only be applied when the loss function is in + training mode, and will not be applied if a precomputed feature map is + supplied as the target. + :attr:`reduction` can be set to ``'mean'``, ``'sum'``, or ``'none'`` + similarly to the loss functions in :mod:`torch.nn`. The default is + ``'mean'``. + :meth:`get_features()` may be used to precompute the features for the + target, to speed up the case where inputs are compared against the same + target over and over. To use the precomputed features, pass them in as + :attr:`target` and set :attr:`target_is_features` to :code:`True`. + Instances of :class:`VGGLoss` must be manually converted to the same + device and dtype as their inputs. + """ + + models = {'vgg16': models.vgg16, 'vgg19': models.vgg19} + + def __init__(self, model='vgg16', layer=8, shift=0, reduction='mean'): + super().__init__() + self.instancenorm = nn.InstanceNorm2d(512, affine=False) + self.shift = shift + self.reduction = reduction + self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + self.model = self.models[model](pretrained=True).features[:layer+1] + self.model.eval() + self.model.requires_grad_(False) + + def get_features(self, input): + return self.model(self.normalize(input)) + + def train(self, mode=True): + self.training = mode + + def forward(self, input, target, target_is_features=False): + if target_is_features: + input_feats = self.get_features(input) + target_feats = target + else: + sep = input.shape[0] + batch = torch.cat([input, target]) + if self.shift and self.training: + padded = F.pad(batch, [self.shift] * 4, mode='replicate') + batch = transforms.RandomCrop(batch.shape[2:])(padded) + feats = self.get_features(batch) + input_feats, target_feats = feats[:sep], feats[sep:] + # input_feats, target_feats = \ + # self.instancenorm(input_feats), \ + # self.instancenorm(target_feats) + return F.mse_loss(input_feats, target_feats, reduction=self.reduction) \ No newline at end of file diff --git a/training/networks/xception.py b/training/networks/xception.py new file mode 100644 index 0000000000000000000000000000000000000000..410345c5e15af8aee77a7ff4e3910967bf2d4fce --- /dev/null +++ b/training/networks/xception.py @@ -0,0 +1,285 @@ +''' +# author: Zhiyuan Yan +# email: zhiyuanyan@link.cuhk.edu.cn +# date: 2023-0706 + +The code is mainly modified from GitHub link below: +https://github.com/ondyari/FaceForensics/blob/master/classification/network/xception.py +''' + +import os +import argparse +import logging + +import math +import torch +# import pretrainedmodels +import torch.nn as nn +import torch.nn.functional as F + +import torch.utils.model_zoo as model_zoo +from torch.nn import init +from typing import Union +from metrics.registry import BACKBONE + +logger = logging.getLogger(__name__) + + + +class SeparableConv2d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False): + super(SeparableConv2d, self).__init__() + + self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, + stride, padding, dilation, groups=in_channels, bias=bias) + self.pointwise = nn.Conv2d( + in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias) + + def forward(self, x): + x = self.conv1(x) + x = self.pointwise(x) + return x + + +class Block(nn.Module): + def __init__(self, in_filters, out_filters, reps, strides=1, start_with_relu=True, grow_first=True): + super(Block, self).__init__() + + if out_filters != in_filters or strides != 1: + self.skip = nn.Conv2d(in_filters, out_filters, + 1, stride=strides, bias=False) + self.skipbn = nn.BatchNorm2d(out_filters) + else: + self.skip = None + + self.relu = nn.ReLU(inplace=True) + rep = [] + + filters = in_filters + if grow_first: # whether the number of filters grows first + rep.append(self.relu) + rep.append(SeparableConv2d(in_filters, out_filters, + 3, stride=1, padding=1, bias=False)) + rep.append(nn.BatchNorm2d(out_filters)) + filters = out_filters + + for i in range(reps-1): + rep.append(self.relu) + rep.append(SeparableConv2d(filters, filters, + 3, stride=1, padding=1, bias=False)) + rep.append(nn.BatchNorm2d(filters)) + + if not grow_first: + rep.append(self.relu) + rep.append(SeparableConv2d(in_filters, out_filters, + 3, stride=1, padding=1, bias=False)) + rep.append(nn.BatchNorm2d(out_filters)) + + if not start_with_relu: + rep = rep[1:] + else: + rep[0] = nn.ReLU(inplace=False) + + if strides != 1: + rep.append(nn.MaxPool2d(3, strides, 1)) + self.rep = nn.Sequential(*rep) + + def forward(self, inp): + x = self.rep(inp) + + if self.skip is not None: + skip = self.skip(inp) + skip = self.skipbn(skip) + else: + skip = inp + + x += skip + return x + +def add_gaussian_noise(ins, mean=0, stddev=0.2): + noise = ins.data.new(ins.size()).normal_(mean, stddev) + return ins + noise + + +@BACKBONE.register_module(module_name="xception") +class Xception(nn.Module): + """ + Xception optimized for the ImageNet dataset, as specified in + https://arxiv.org/pdf/1610.02357.pdf + """ + + def __init__(self, xception_config): + """ Constructor + Args: + xception_config: configuration file with the dict format + """ + super(Xception, self).__init__() + self.num_classes = xception_config["num_classes"] + self.mode = xception_config["mode"] + inc = xception_config["inc"] + dropout = xception_config["dropout"] + + # Entry flow + self.conv1 = nn.Conv2d(inc, 32, 3, 2, 0, bias=False) + + self.bn1 = nn.BatchNorm2d(32) + self.relu = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(32, 64, 3, bias=False) + self.bn2 = nn.BatchNorm2d(64) + # do relu here + + self.block1 = Block( + 64, 128, 2, 2, start_with_relu=False, grow_first=True) + self.block2 = Block( + 128, 256, 2, 2, start_with_relu=True, grow_first=True) + self.block3 = Block( + 256, 728, 2, 2, start_with_relu=True, grow_first=True) + + # middle flow + self.block4 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block5 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block6 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block7 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + + self.block8 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block9 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block10 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block11 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + + # Exit flow + self.block12 = Block( + 728, 1024, 2, 2, start_with_relu=True, grow_first=False) + + self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) + self.bn3 = nn.BatchNorm2d(1536) + + # do relu here + self.conv4 = SeparableConv2d(1536, 2048, 3, 1, 1) + self.bn4 = nn.BatchNorm2d(2048) + # used for iid + final_channel = 2048 + if self.mode == 'adjust_channel_iid': + final_channel = 512 + self.mode = 'adjust_channel' + self.last_linear = nn.Linear(final_channel, self.num_classes) + if dropout: + self.last_linear = nn.Sequential( + nn.Dropout(p=dropout), + nn.Linear(final_channel, self.num_classes) + ) + + self.adjust_channel = nn.Sequential( + nn.Conv2d(2048, 512, 1, 1), + nn.BatchNorm2d(512), + nn.ReLU(inplace=False), + ) + + def fea_part1_0(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + return x + + def fea_part1_1(self, x): + + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + return x + + def fea_part1(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + return x + + def fea_part2(self, x): + x = self.block1(x) + x = self.block2(x) + x = self.block3(x) + + return x + + def fea_part3(self, x): + if self.mode == "shallow_xception": + return x + else: + x = self.block4(x) + x = self.block5(x) + x = self.block6(x) + x = self.block7(x) + return x + + def fea_part4(self, x): + if self.mode == "shallow_xception": + x = self.block12(x) + else: + x = self.block8(x) + x = self.block9(x) + x = self.block10(x) + x = self.block11(x) + x = self.block12(x) + return x + + def fea_part5(self, x): + x = self.conv3(x) + x = self.bn3(x) + x = self.relu(x) + + x = self.conv4(x) + x = self.bn4(x) + + return x + + def features(self, input): + x = self.fea_part1(input) + + x = self.fea_part2(x) + x = self.fea_part3(x) + x = self.fea_part4(x) + + x = self.fea_part5(x) + + if self.mode == 'adjust_channel': + x = self.adjust_channel(x) + + return x + + def classifier(self, features,id_feat=None): + # for iid + if self.mode == 'adjust_channel': + x = features + else: + x = self.relu(features) + + if len(x.shape) == 4: + x = F.adaptive_avg_pool2d(x, (1, 1)) + x = x.view(x.size(0), -1) + self.last_emb = x + # for iid + if id_feat!=None: + out = self.last_linear(x-id_feat) + else: + out = self.last_linear(x) + return out + + def forward(self, input): + x = self.features(input) + out = self.classifier(x) + return out, x diff --git a/training/networks/xception_ffd.py b/training/networks/xception_ffd.py new file mode 100644 index 0000000000000000000000000000000000000000..5f23ddae50da43390081167dd199d31a1118c889 --- /dev/null +++ b/training/networks/xception_ffd.py @@ -0,0 +1,267 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import os +import sys + +class SeparableConv2d(nn.Module): + def __init__(self, c_in, c_out, ks, stride=1, padding=0, dilation=1, bias=False): + super(SeparableConv2d, self).__init__() + self.c = nn.Conv2d(c_in, c_in, ks, stride, padding, dilation, groups=c_in, bias=bias) + self.pointwise = nn.Conv2d(c_in, c_out, 1, 1, 0, 1, 1, bias=bias) + + def forward(self, x): + x = self.c(x) + x = self.pointwise(x) + return x + +class Block(nn.Module): + def __init__(self, c_in, c_out, reps, stride=1, start_with_relu=True, grow_first=True): + super(Block, self).__init__() + + self.skip = None + self.skip_bn = None + if c_out != c_in or stride!= 1: + self.skip = nn.Conv2d(c_in, c_out, 1, stride=stride, bias=False) + self.skip_bn = nn.BatchNorm2d(c_out) + + self.relu = nn.ReLU(inplace=True) + + rep = [] + c = c_in + if grow_first: + rep.append(self.relu) + rep.append(SeparableConv2d(c_in, c_out, 3, stride=1, padding=1, bias=False)) + rep.append(nn.BatchNorm2d(c_out)) + c = c_out + + for i in range(reps - 1): + rep.append(self.relu) + rep.append(SeparableConv2d(c, c, 3, stride=1, padding=1, bias=False)) + rep.append(nn.BatchNorm2d(c)) + + if not grow_first: + rep.append(self.relu) + rep.append(SeparableConv2d(c_in, c_out, 3, stride=1, padding=1, bias=False)) + rep.append(nn.BatchNorm2d(c_out)) + + if not start_with_relu: + rep = rep[1:] + else: + rep[0] = nn.ReLU(inplace=False) + + if stride != 1: + rep.append(nn.MaxPool2d(3, stride, 1)) + self.rep = nn.Sequential(*rep) + + def forward(self, inp): + x = self.rep(inp) + + if self.skip is not None: + y = self.skip(inp) + y = self.skip_bn(y) + else: + y = inp + + x += y + return x + +class RegressionMap(nn.Module): + def __init__(self, c_in): + super(RegressionMap, self).__init__() + self.c = SeparableConv2d(c_in, 1, 3, stride=1, padding=1, bias=False) + self.s = nn.Sigmoid() + + def forward(self, x): + mask = self.c(x) + mask = self.s(mask) + return mask, None + +class TemplateMap(nn.Module): + def __init__(self, c_in, templates): + super(TemplateMap, self).__init__() + self.c = Block(c_in, 364, 2, 2, start_with_relu=True, grow_first=False) + self.l = nn.Linear(364, 10) + self.relu = nn.ReLU(inplace=True) + + self.templates = templates + + def forward(self, x): + v = self.c(x) + v = self.relu(v) + v = F.adaptive_avg_pool2d(v, (1,1)) + v = v.view(v.size(0), -1) + v = self.l(v) + mask = torch.mm(v, self.templates.reshape(10,361)) + mask = mask.reshape(x.shape[0], 1, 19, 19) + + return mask, v + +class PCATemplateMap(nn.Module): + def __init__(self, templates): + super(PCATemplateMap, self).__init__() + self.templates = templates + + def forward(self, x): + fe = x.view(x.shape[0], x.shape[1], x.shape[2]*x.shape[3]) + fe = torch.transpose(fe, 1, 2) + mu = torch.mean(fe, 2, keepdim=True) + fea_diff = fe - mu + + cov_fea = torch.bmm(fea_diff, torch.transpose(fea_diff, 1, 2)) + B = self.templates.reshape(1, 10, 361).repeat(x.shape[0], 1, 1) + D = torch.bmm(torch.bmm(B, cov_fea), torch.transpose(B, 1, 2)) + eigen_value, eigen_vector = D.symeig(eigenvectors=True) + index = torch.tensor([9]).cuda() + eigen = torch.index_select(eigen_vector, 2, index) + + v = eigen.squeeze(-1) + mask = torch.mm(v, self.templates.reshape(10, 361)) + mask = mask.reshape(x.shape[0], 1, 19, 19) + return mask, v + +class Xception(nn.Module): + """ + Xception optimized for the ImageNet dataset, as specified in + https://arxiv.org/pdf/1610.02357.pdf + """ + def __init__(self, maptype, templates, num_classes=1000): + super(Xception, self).__init__() + self.num_classes = num_classes + + self.conv1 = nn.Conv2d(3, 32, 3,2, 0, bias=False) + self.bn1 = nn.BatchNorm2d(32) + self.relu = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(32,64,3,bias=False) + self.bn2 = nn.BatchNorm2d(64) + + self.block1=Block(64,128,2,2,start_with_relu=False,grow_first=True) + self.block2=Block(128,256,2,2,start_with_relu=True,grow_first=True) + self.block3=Block(256,728,2,2,start_with_relu=True,grow_first=True) + self.block4=Block(728,728,3,1,start_with_relu=True,grow_first=True) + self.block5=Block(728,728,3,1,start_with_relu=True,grow_first=True) + self.block6=Block(728,728,3,1,start_with_relu=True,grow_first=True) + self.block7=Block(728,728,3,1,start_with_relu=True,grow_first=True) + self.block8=Block(728,728,3,1,start_with_relu=True,grow_first=True) + self.block9=Block(728,728,3,1,start_with_relu=True,grow_first=True) + self.block10=Block(728,728,3,1,start_with_relu=True,grow_first=True) + self.block11=Block(728,728,3,1,start_with_relu=True,grow_first=True) + self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False) + + self.conv3 = SeparableConv2d(1024,1536,3,1,1) + self.bn3 = nn.BatchNorm2d(1536) + + self.conv4 = SeparableConv2d(1536,2048,3,1,1) + self.bn4 = nn.BatchNorm2d(2048) + + self.last_linear = nn.Linear(2048, num_classes) + + if maptype == 'none': + self.map = [1, None] + elif maptype == 'reg': + self.map = RegressionMap(728) + elif maptype == 'tmp': + self.map = TemplateMap(728, templates) + elif maptype == 'pca_tmp': + self.map = PCATemplateMap(728) + else: + print('Unknown map type: `{0}`'.format(maptype)) + sys.exit() + + def features(self, input): + x = self.conv1(input) + x = self.bn1(x) + x = self.relu(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + x = self.block1(x) + x = self.block2(x) + x = self.block3(x) + x = self.block4(x) + x = self.block5(x) + x = self.block6(x) + x = self.block7(x) + mask, vec = self.map(x) + x = x * mask + x = self.block8(x) + x = self.block9(x) + x = self.block10(x) + x = self.block11(x) + x = self.block12(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.relu(x) + + x = self.conv4(x) + x = self.bn4(x) + return x, mask, vec + + def logits(self, features): + x = self.relu(features) + x = F.adaptive_avg_pool2d(x, (1, 1)) + x = x.view(x.size(0), -1) + x = self.last_linear(x) + return x + + def forward(self, input): + x, mask, vec = self.features(input) + x = self.logits(x) + return x, mask, vec + +def init_weights(m): + classname = m.__class__.__name__ + if classname.find('SeparableConv2d') != -1: + m.c.weight.data.normal_(0.0, 0.01) + if m.c.bias is not None: + m.c.bias.data.fill_(0) + m.pointwise.weight.data.normal_(0.0, 0.01) + if m.pointwise.bias is not None: + m.pointwise.bias.data.fill_(0) + elif classname.find('Conv') != -1 or classname.find('Linear') != -1: + m.weight.data.normal_(0.0, 0.01) + if m.bias is not None: + m.bias.data.fill_(0) + elif classname.find('BatchNorm') != -1: + m.weight.data.normal_(1.0, 0.01) + m.bias.data.fill_(0) + elif classname.find('LSTM') != -1: + for i in m._parameters: + if i.__class__.__name__.find('weight') != -1: + i.data.normal_(0.0, 0.01) + elif i.__class__.__name__.find('bias') != -1: + i.bias.data.fill_(0) + +class Model: + def __init__(self, maptype='None', templates=None, num_classes=2, load_pretrain=True): + model = Xception(maptype, templates, num_classes=num_classes) + if load_pretrain: + state_dict = torch.load('./xception-b5690688.pth') + for name, weights in state_dict: + if 'pointwise' in name: + state_dict[name] = weights.unsqueeze(-1).unsqueeze(-1) + del state_dict['fc.weight'] + del state_dict['fc.bias'] + model.load_state_dict(state_dict, False) + else: + model.apply(init_weights) + self.model = model + + def save(self, epoch, optim, model_dir): + state = {'net': self.model.state_dict(), 'optim': optim.state_dict()} + torch.save(state, '{0}/{1:06d}.tar'.format(model_dir, epoch)) + print('Saved model `{0}`'.format(epoch)) + + def load(self, epoch, model_dir): + filename = '{0}{1:06d}.tar'.format(model_dir, epoch) + print('Loading model from {0}'.format(filename)) + if os.path.exists(filename): + state = torch.load(filename) + self.model.load_state_dict(state['net']) + else: + print('Failed to load model from {0}'.format(filename)) + diff --git a/training/networks/xception_sladd.py b/training/networks/xception_sladd.py new file mode 100644 index 0000000000000000000000000000000000000000..b5b949d7918fab93983d915857ea6f60a22c9927 --- /dev/null +++ b/training/networks/xception_sladd.py @@ -0,0 +1,272 @@ +""" + +Author: Andreas Rössler +""" +import torchvision +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.model_zoo as model_zoo + +from metrics.registry import BACKBONE + +pretrained_settings = { + 'xception': { + 'imagenet': { + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/xception-b5690688.pth', + 'input_space': 'RGB', + 'input_size': [3, 299, 299], + 'input_range': [0, 1], + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + 'num_classes': 1000, + 'scale': 0.8975 + # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 + } + } +} + + +class SeparableConv2d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False): + super(SeparableConv2d, self).__init__() + + self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, + stride, padding, dilation, groups=in_channels, bias=bias) + self.pointwise = nn.Conv2d( + in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias) + + def forward(self, x): + x = self.conv1(x) + x = self.pointwise(x) + return x + + +class RegressionMap(nn.Module): + def __init__(self, c_in): + super(RegressionMap, self).__init__() + self.c = SeparableConv2d(c_in, 1, 3, stride=1, padding=1, bias=False) + self.s = nn.Sigmoid() + + def forward(self, x): + mask = self.c(x) + mask = self.s(mask) + return mask + + +class Block(nn.Module): + def __init__(self, in_filters, out_filters, reps, strides=1, start_with_relu=True, grow_first=True): + super(Block, self).__init__() + + if out_filters != in_filters or strides != 1: + self.skip = nn.Conv2d(in_filters, out_filters, + 1, stride=strides, bias=False) + self.skipbn = nn.BatchNorm2d(out_filters) + else: + self.skip = None + + self.relu = nn.ReLU(inplace=False) + rep = [] + + filters = in_filters + if grow_first: # whether the number of filters grows first + rep.append(self.relu) + rep.append(SeparableConv2d(in_filters, out_filters, + 3, stride=1, padding=1, bias=False)) + rep.append(nn.BatchNorm2d(out_filters)) + filters = out_filters + + for i in range(reps - 1): + rep.append(self.relu) + rep.append(SeparableConv2d(filters, filters, + 3, stride=1, padding=1, bias=False)) + rep.append(nn.BatchNorm2d(filters)) + + if not grow_first: + rep.append(self.relu) + rep.append(SeparableConv2d(in_filters, out_filters, + 3, stride=1, padding=1, bias=False)) + rep.append(nn.BatchNorm2d(out_filters)) + + if not start_with_relu: + rep = rep[1:] + else: + rep[0] = nn.ReLU(inplace=False) + + if strides != 1: + rep.append(nn.MaxPool2d(3, strides, 1)) + self.rep = nn.Sequential(*rep) + + def forward(self, inp): + x = self.rep(inp) + + if self.skip is not None: + skip = self.skip(inp) + skip = self.skipbn(skip) + else: + skip = inp + + x += skip + return x + +@BACKBONE.register_module(module_name="xception_sladd") +class Xception_SLADD(nn.Module): + """ + Xception optimized for the ImageNet dataset, as specified in + https://arxiv.org/pdf/1610.02357.pdf + """ + + def __init__(self, config): + """ Constructor + Args: + num_classes: number of classes + """ + super(Xception_SLADD, self).__init__() + num_classes = config["num_classes"] + inc = config["inc"] + dropout = config["dropout"] + + # Entry flow + self.conv1 = nn.Conv2d(inc, 32, 3, 2, 0, bias=False) + self.bn1 = nn.BatchNorm2d(32) + self.relu = nn.ReLU(inplace=False) + + self.conv2 = nn.Conv2d(32, 64, 3, bias=False) + self.bn2 = nn.BatchNorm2d(64) + # do relu here + + self.block1 = Block( + 64, 128, 2, 2, start_with_relu=False, grow_first=True) + self.block2 = Block( + 128, 256, 2, 2, start_with_relu=True, grow_first=True) + self.block3 = Block( + 256, 728, 2, 2, start_with_relu=True, grow_first=True) + + # middle flow + self.block4 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block5 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block6 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block7 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + + self.block8 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block9 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block10 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + self.block11 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True) + + # Exit flow + self.block12 = Block( + 728, 1024, 2, 2, start_with_relu=True, grow_first=False) + + self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) + self.bn3 = nn.BatchNorm2d(1536) + + # do relu here + self.conv4 = SeparableConv2d(1536, 2048, 3, 1, 1) + self.bn4 = nn.BatchNorm2d(2048) + final_channel = 2048 + self.last_linear = nn.Linear(final_channel, num_classes) + if dropout: + self.last_linear = nn.Sequential( + nn.Dropout(p=dropout), + nn.Linear(final_channel, num_classes) + ) + self.type_fc = nn.Linear(2048, 5) + self.mag_fc = nn.Linear(2048, 1) + self.map = RegressionMap(728) + self.pecent = 1.0 / 1.5 + + def fea_part1_0(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + return x + + def fea_part1_1(self, x): + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + return x + + def fea_part1(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + return x + + def fea_part2(self, x): + x = self.block1(x) + x = self.block2(x) + x = self.block3(x) + + return x + + def fea_part3(self, x): + x = self.block4(x) + x = self.block5(x) + x = self.block6(x) + x = self.block7(x) + + return x + + def fea_part4(self, x): + x = self.block8(x) + x = self.block9(x) + x = self.block10(x) + x = self.block11(x) + x = self.block12(x) + + return x + + def fea_part5(self, x): + x = self.conv3(x) + x = self.bn3(x) + x = self.relu(x) + + x = self.conv4(x) + x = self.bn4(x) + + return x + + def features(self, input): + x = self.fea_part1(input) + + x = self.fea_part2(x) + x3 = self.fea_part3(x) + x = self.fea_part4(x3) + + x = self.fea_part5(x) + return x,x3 + + # def classifier(self, features): + def classifier(self, x): + x = self.relu(x) + x = F.adaptive_avg_pool2d(x, (1, 1)) + x = x.view(x.size(0), -1) + out = self.last_linear(x) + return out, x + + def estimateMap(self, x): + map = self.map(x) + return map + + # def forward(self, input): + def forward(self, x): + x,x3=self.features(x) + out, fea, type, mag = self.classifier(x) + map = self.estimateMap(x3) + return out, fea, map, type, mag diff --git a/training/optimizor/LinearLR.py b/training/optimizor/LinearLR.py new file mode 100644 index 0000000000000000000000000000000000000000..80bc70dbae46bb9f76aa65afe6f4a1b95dd25619 --- /dev/null +++ b/training/optimizor/LinearLR.py @@ -0,0 +1,20 @@ +import torch +from torch.optim import SGD +from torch.optim.lr_scheduler import _LRScheduler + +class LinearDecayLR(_LRScheduler): + def __init__(self, optimizer, n_epoch, start_decay, last_epoch=-1): + self.start_decay=start_decay + self.n_epoch=n_epoch + super(LinearDecayLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + last_epoch = self.last_epoch + n_epoch=self.n_epoch + b_lr=self.base_lrs[0] + start_decay=self.start_decay + if last_epoch>start_decay: + lr=b_lr-b_lr/(n_epoch-start_decay)*(last_epoch-start_decay) + else: + lr=b_lr + return [lr] \ No newline at end of file diff --git a/training/optimizor/SAM.py b/training/optimizor/SAM.py new file mode 100644 index 0000000000000000000000000000000000000000..7b8d1dc52726ffea22553ce96a6e0d37a902fbff --- /dev/null +++ b/training/optimizor/SAM.py @@ -0,0 +1,77 @@ +# borrowed from + +import torch + +import torch +import torch.nn as nn + +def disable_running_stats(model): + def _disable(module): + if isinstance(module, nn.BatchNorm2d): + module.backup_momentum = module.momentum + module.momentum = 0 + + model.apply(_disable) + +def enable_running_stats(model): + def _enable(module): + if isinstance(module, nn.BatchNorm2d) and hasattr(module, "backup_momentum"): + module.momentum = module.backup_momentum + + model.apply(_enable) + +class SAM(torch.optim.Optimizer): + def __init__(self, params, base_optimizer, rho=0.05, **kwargs): + assert rho >= 0.0, f"Invalid rho, should be non-negative: {rho}" + + defaults = dict(rho=rho, **kwargs) + super(SAM, self).__init__(params, defaults) + + self.base_optimizer = base_optimizer(self.param_groups, **kwargs) + self.param_groups = self.base_optimizer.param_groups + + @torch.no_grad() + def first_step(self, zero_grad=False): + grad_norm = self._grad_norm() + for group in self.param_groups: + scale = group["rho"] / (grad_norm + 1e-12) + + for p in group["params"]: + if p.grad is None: continue + e_w = p.grad * scale.to(p) + p.add_(e_w) # climb to the local maximum "w + e(w)" + self.state[p]["e_w"] = e_w + + if zero_grad: self.zero_grad() + + @torch.no_grad() + def second_step(self, zero_grad=False): + for group in self.param_groups: + for p in group["params"]: + if p.grad is None: continue + p.sub_(self.state[p]["e_w"]) # get back to "w" from "w + e(w)" + + self.base_optimizer.step() # do the actual "sharpness-aware" update + + if zero_grad: self.zero_grad() + + @torch.no_grad() + def step(self, closure=None): + assert closure is not None, "Sharpness Aware Minimization requires closure, but it was not provided" + closure = torch.enable_grad()(closure) # the closure should do a full forward-backward pass + + self.first_step(zero_grad=True) + closure() + self.second_step() + + def _grad_norm(self): + shared_device = self.param_groups[0]["params"][0].device # put everything on the same device, in case of model parallelism + norm = torch.norm( + torch.stack([ + p.grad.norm(p=2).to(shared_device) + for group in self.param_groups for p in group["params"] + if p.grad is not None + ]), + p=2 + ) + return norm \ No newline at end of file diff --git a/training/optimizor/__init__.py b/training/optimizor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..676145d777810e4a51bdaf59fdec4f5358aae349 --- /dev/null +++ b/training/optimizor/__init__.py @@ -0,0 +1,7 @@ +import os +import sys +current_file_path = os.path.abspath(__file__) +parent_dir = os.path.dirname(os.path.dirname(current_file_path)) +project_root_dir = os.path.dirname(parent_dir) +sys.path.append(parent_dir) +sys.path.append(project_root_dir)