#!/bin/bash #SBATCH --job-name=trial #SBATCH --gres=gpu:1 #SBATCH --partition=a6000 #SBATCH --time=0-12:00:00 # d-hh:mm:ss, job time limit #SBATCH --mem=20000 # cpu memory size #SBATCH --cpus-per-task=4 #SBATCH --output=./trial.log ml purge ml load cuda/11.8 eval "$(conda shell.bash hook)" conda activate risall cd /data2/projects/chaeyun/RIS-DMMI CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node 1 train.py --model dmmi_swin --dataset refcocog --splitBy umd --model_id 1234 --batch-size 2 --lr 0.00005 --wd 1e-2 --window12 --swin_type base --pretrained_backbone /data2/projects/chaeyun/LAVT-RIS/pretrained_weights/swin_base_patch4_window12_384_22k.pth --output_dir './tmp/' --epochs 40 --img_size 480 2>&1 |tee ./tmp/output.log