export WANDB_MODE=disabled export CUDA_LAUNCH_BLOCKING=1 DATASET=Instruments BASE_MODEL=/datain/v-yinju/llama-7b DATA_PATH=/datain/v-yinju/rqvae-zzx/data INDEX=your_index_save_path OUTPUT_DIR=your_ckpt_save_dir mkdir -p $OUTPUT_DIR torchrun --nproc_per_node=8 lora_finetune.py \ --base_model $BASE_MODEL \ --output_dir $OUTPUT_DIR \ --dataset $DATASET \ --data_path $DATA_PATH \ --per_device_batch_size 6 \ --gradient_accumulation_steps 2 \ --learning_rate 5e-5 \ --epochs 4 \ --weight_decay 0.01 \ --save_and_eval_strategy epoch \ --fp16 \ --deepspeed ./config/ds_z2_fp16.json \ --dataloader_num_workers 4 \ --only_train_response \ --tasks seqrec,item2index,index2item,fusionseqrec,itemsearch,preferenceobtain \ --train_prompt_sample_num 1,1,1,1,1,1 \ --train_data_sample_num 0,0,0,0,0,0 \ --index_file $INDEX cd convert nohup ./convert.sh $OUTPUT_DIR >convert.log 2>&1 & cd ..