#SBATCH --job-name=lavt_gccm # Submit a job named "example" | |
#SBATCH [email protected] | |
#SBATCH --mail-type=BEGIN,END,FAIL | |
#SBATCH --partition=a100 # a6000 or a100 | |
#SBATCH --gres=gpu:2 | |
#SBATCH --time=7-00:00:00 # d-hh:mm:ss, max time limit | |
#SBATCH --mem=84000 # cpu memory size | |
#SBATCH --cpus-per-task=8 # cpu num | |
#SBATCH --output=log_mosaic_grefcoco_unc_lavt_one.txt # std output filename | |
ml cuda/11.0 # ํ์ํ ์ฟ ๋ค ๋ฒ์ ๋ก๋ | |
eval "$(conda shell.bash hook)" # Initialize Conda Environment | |
conda activate lavt # Activate your conda environment | |
# train | |
# mkdir ./models | |
# mkdir ./models/gref_unc | |
# CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node 2 --master_port 12345 train_mosaic.py --model lavt --dataset grefcoco --splitBy unc --model_id gref_unc --batch-size 14 --lr 0.00005 --wd 1e-2 --swin_type base --pretrained_swin_weights ./pretrained_weights/swin_base_patch4_window12_384_22k.pth --epochs 40 --img_size 480 2>&1 | tee ./models/gref_unc/output | |
mkdir ./models/mosaic_gref_unc_lavt_one | |
srun python -m torch.distributed.launch --nproc_per_node 2 --master_port 13336 train_mosaic.py --model lavt_one --dataset grefcoco --splitBy unc --model_id mosaic_gref_unc_lavt_one --batch-size 14 --lr 0.00005 --wd 1e-2 --swin_type base --pretrained_swin_weights ./pretrained_weights/swin_base_patch4_window12_384_22k.pth --epochs 50 --img_size 480 2>&1 | tee ./models/mosaic_gref_unc_lavt_one/output |