File size: 1,585 Bytes
0b32e3c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
#!/bin/bash
#SBATCH --job-name=dmmibs12-gg
#SBATCH --partition=a6000
#SBATCH --gres=gpu:1
#SBATCH --time=12-00:00:00 # d-hh:mm:ss, job time limit
#SBATCH --mem=32000 # cpu memory size
#SBATCH --cpus-per-task=6
#SBATCH --output=./trainlog/dmmi_gref_google_bs12_repro.log
ml purge
ml load cuda/11.8
eval "$(conda shell.bash hook)"
conda activate risall
cd /data2/projects/chaeyun/RIS-DMMI
export NCCL_P2P_DISABLE=1
export NVIDIA_TF32_OVERRIDE=0
# dmmi_swin_hardpos_only
GPUS=1
OUTPUT_DIR=$1
EXP_NAME=$2
# MARGIN=$3
# TEMP=$4
# MODE=$5
# TRAIN
CUDA_VISIBLE_DEVICES=0 torchrun \
--nproc_per_node=$GPUS --master_port=2947 train.py \
--model dmmi_swin \
--dataset refcocog \
--split val \
--splitBy google \
--output_dir ${OUTPUT_DIR} \
--model_id ${EXP_NAME} \
--batch-size 12 \
--lr 0.00005 \
--wd 1e-2 \
--window12 \
--swin_type base \
--pretrained_backbone /data2/projects/chaeyun/LAVT-RIS/pretrained_weights/swin_base_patch4_window12_384_22k.pth \
--resume /data2/projects/chaeyun/RIS-DMMI/experiments/dmmi_gref_google_bs12_repro/model_best_dmmi_gref_google_bs12_repro.pth \
--epochs 40 \
--img_size 480
# sbatch train_repro.sh ./experiments/dmmi_refzom_bs12_repro dmmi_refzom_bs12_repro
# sbatch train_repro.sh ./experiments/dmmi_gref_google_bs12_repro dmmi_gref_google_bs12_repro
# /data2/projects/chaeyun/RIS-DMMI/experiments/dmmi_gref_google_bs12_repro/model_best_dmmi_gref_google_bs12_repro.pth
# /data2/projects/chaeyun/RIS-DMMI/experiments/dmmi_refzom_bs12_repro/model_best_dmmi_refzom_bs12_repro.pth |