#!/bin/bash deepspeed --num_nodes=1 --num_gpus=7 --master_port=25001 llava/train/train_mem.py \ --deepspeed ./scripts/zero2.json \ --model_name_or_path mistralai/Mistral-7B-Instruct-v0.1 \ --version v1 \ --dataset_config /mnt/bn/algo-masp-nas-2/xiangchen/repo/LLaVA/llava/configs/gpt4v_increasing_ablation/finetune_videollava.yaml \ --vision_tower HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit \ --qformer_model_path /mnt/bn/algo-masp-nas-2/xiangchen/model/masp_models/checkpoints/llava-pretrain-siglip-qformer_scratch_64_projector/mm_projector.bin \ --pretrain_mm_mlp_adapter /mnt/bn/algo-masp-nas-2/xiangchen/model/masp_models/checkpoints/llava-pretrain-siglip-qformer_scratch_64_projector/mm_projector.bin \ --freeze_qformer False \ --mm_vision_select_layer -2 \ --mm_use_start_end True \ --mm_use_patch_token False \ --image_aspect_ratio pad \ --num_token_per_image 64 \ --num_query_token 64 \ --bf16 True \ --output_dir /mnt/bn/algo-masp-nas-2/xiangchen/model/masp_models/checkpoints/llava-siglip_qformrer_from_scratch_64_llava_800k \ --group_by_modality_length True \ --num_train_epochs 1 \ --per_device_train_batch_size 4 \ --per_device_eval_batch_size 4 \ --gradient_accumulation_steps 4 \ --evaluation_strategy "no" \ --save_strategy "steps" \ --save_steps 2000 \ --save_total_limit 1 \ --learning_rate 1e-5 \ --weight_decay 0. \ --warmup_ratio 0.03 \ --lr_scheduler_type "cosine" \ --logging_steps 1 \ --tf32 True \ --model_max_length 4096 \ --gradient_checkpointing True \ --dataloader_num_workers 1 \ --lazy_preprocess True \ --report_to none