assistant_tag: assistant bf16: true content_tag: value cutoff_len: 2048 dataset: mli-lab/OHprompts_GPT4oresponses_7.5k dataset_dir: ONLINE ddp_timeout: 180000000 deepspeed: dcft/train/zero3.json do_train: true enable_liger_kernel: true finetuning_type: full formatting: sharegpt global_batch_size: 96 gradient_accumulation_steps: 6 hub_model_id: mli-lab/qwen_OHprompts_GPT4oresponses_8k learning_rate: 1.0e-05 logging_steps: 1 lr_scheduler_type: cosine max_samples: 1000000 messages: conversations model_name_or_path: Qwen/Qwen2.5-7B-Instruct neat_packing: true num_train_epochs: 3.0 output_dir: /data/horse/ws/rehe951g-p_finetuning/checkpoints/qwen_OHprompts_GPT4oresponses_8k overwrite_cache: true overwrite_output_dir: true packing: true per_device_eval_batch_size: 2 per_device_train_batch_size: 2 plot_loss: true preprocessing_num_workers: 16 push_to_db: true push_to_hub: true report_to: wandb role_tag: from run_name: qwen_OHprompts_GPT4oresponses_8k save_steps: 10 stage: sft template: qwen25 user_tag: user warmup_ratio: 0.1