File size: 1,645 Bytes
bbfa6f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
#!/bin/bash

deepspeed --num_nodes=1 --num_gpus=7 --master_port=25001 llava/train/train_mem.py \
    --deepspeed ./scripts/zero2.json \
    --model_name_or_path mistralai/Mistral-7B-Instruct-v0.1 \
    --version v1 \
    --dataset_config /mnt/bn/algo-masp-nas-2/xiangchen/repo/LLaVA/llava/configs/adso_increasing_ablation/finetune_gpt4v_adso185k.yaml \
    --vision_tower eva-vit-g \
    --vit_model_path /mnt/bn/data-tns-algo-masp/baiyi.by/masp/model/eva_vit_g.pth \
    --qformer_model_path /mnt/bn/data-tns-algo-masp/baiyi.by/masp/model/blip2_pretrained_flant5xxl.pth \
    --pretrain_mm_mlp_adapter /mnt/bn/algo-masp-nas-2/xiangchen/model/masp_models/checkpoints/mistral-videoproj-pretrain-frames-base-intvid/mm_projector.bin \
    --freeze_qformer False \
    --mm_vision_select_layer -2 \
    --mm_use_start_end True \
    --mm_use_patch_token False \
    --image_aspect_ratio pad \
    --bf16 True \
    --output_dir /mnt/bn/algo-masp-nas-2/xiangchen/model/masp_models/checkpoints/llava-mistral_gpt4v_adso185k_unfreeze_qformer_data_sampler \
    --group_by_modality_length True \
    --num_train_epochs 1 \
    --per_device_train_batch_size 16 \
    --per_device_eval_batch_size 4 \
    --gradient_accumulation_steps 1 \
    --evaluation_strategy "no" \
    --save_strategy "steps" \
    --save_steps 2000 \
    --save_total_limit 1 \
    --learning_rate 2e-5 \
    --weight_decay 0. \
    --warmup_ratio 0.03 \
    --lr_scheduler_type "cosine" \
    --logging_steps 1 \
    --tf32 True \
    --model_max_length 4096 \
    --gradient_checkpointing True \
    --dataloader_num_workers 1 \
    --lazy_preprocess True \
    --report_to none