jpraysz commited on
Commit
9df2023
·
verified ·
1 Parent(s): 57b00ae

Upload 20 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: other
4
+ base_model: Qwen/Qwen2-VL-7B-Instruct
5
+ tags:
6
+ - llama-factory
7
+ - lora
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: BS_riche_lora
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # BS_riche_lora
18
+
19
+ This model is a fine-tuned version of [Qwen/Qwen2-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct) on the BsKIE3 dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 5e-05
39
+ - train_batch_size: 2
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - gradient_accumulation_steps: 8
43
+ - total_train_batch_size: 16
44
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
45
+ - lr_scheduler_type: cosine
46
+ - num_epochs: 3.0
47
+
48
+ ### Training results
49
+
50
+
51
+
52
+ ### Framework versions
53
+
54
+ - PEFT 0.12.0
55
+ - Transformers 4.49.0
56
+ - Pytorch 2.4.0+cu121
57
+ - Datasets 2.21.0
58
+ - Tokenizers 0.21.0
adapter_config.json ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2-VL-7B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "model.layers.14.mlp.gate_proj",
24
+ "model.layers.26.self_attn.k_proj",
25
+ "model.layers.7.self_attn.q_proj",
26
+ "model.layers.22.self_attn.o_proj",
27
+ "model.layers.20.self_attn.k_proj",
28
+ "model.layers.27.mlp.down_proj",
29
+ "model.layers.2.mlp.gate_proj",
30
+ "model.layers.3.mlp.gate_proj",
31
+ "model.layers.16.self_attn.v_proj",
32
+ "model.layers.8.mlp.gate_proj",
33
+ "model.layers.14.self_attn.q_proj",
34
+ "model.layers.17.mlp.gate_proj",
35
+ "model.layers.11.mlp.up_proj",
36
+ "model.layers.9.self_attn.k_proj",
37
+ "model.layers.21.self_attn.k_proj",
38
+ "model.layers.13.mlp.up_proj",
39
+ "model.layers.11.self_attn.o_proj",
40
+ "model.layers.19.self_attn.o_proj",
41
+ "model.layers.14.mlp.up_proj",
42
+ "model.layers.1.mlp.up_proj",
43
+ "model.layers.5.mlp.gate_proj",
44
+ "model.layers.27.self_attn.v_proj",
45
+ "model.layers.1.self_attn.k_proj",
46
+ "model.layers.21.mlp.up_proj",
47
+ "model.layers.25.mlp.down_proj",
48
+ "model.layers.27.mlp.gate_proj",
49
+ "model.layers.6.self_attn.k_proj",
50
+ "model.layers.14.self_attn.v_proj",
51
+ "model.layers.7.self_attn.k_proj",
52
+ "model.layers.2.self_attn.q_proj",
53
+ "model.layers.23.mlp.up_proj",
54
+ "model.layers.24.mlp.up_proj",
55
+ "model.layers.4.mlp.up_proj",
56
+ "model.layers.2.mlp.up_proj",
57
+ "model.layers.2.mlp.down_proj",
58
+ "model.layers.15.self_attn.o_proj",
59
+ "model.layers.12.self_attn.v_proj",
60
+ "model.layers.10.mlp.up_proj",
61
+ "model.layers.26.mlp.down_proj",
62
+ "model.layers.17.self_attn.k_proj",
63
+ "model.layers.24.self_attn.v_proj",
64
+ "model.layers.26.self_attn.v_proj",
65
+ "model.layers.0.self_attn.o_proj",
66
+ "model.layers.10.self_attn.q_proj",
67
+ "model.layers.11.self_attn.k_proj",
68
+ "model.layers.14.self_attn.k_proj",
69
+ "model.layers.4.mlp.down_proj",
70
+ "model.layers.12.self_attn.o_proj",
71
+ "model.layers.17.self_attn.q_proj",
72
+ "model.layers.27.mlp.up_proj",
73
+ "model.layers.8.self_attn.o_proj",
74
+ "model.layers.6.self_attn.v_proj",
75
+ "model.layers.5.mlp.up_proj",
76
+ "model.layers.19.self_attn.v_proj",
77
+ "model.layers.6.mlp.down_proj",
78
+ "model.layers.22.mlp.up_proj",
79
+ "model.layers.18.mlp.up_proj",
80
+ "model.layers.0.self_attn.q_proj",
81
+ "model.layers.1.self_attn.o_proj",
82
+ "model.layers.13.self_attn.q_proj",
83
+ "model.layers.14.mlp.down_proj",
84
+ "model.layers.20.mlp.up_proj",
85
+ "model.layers.16.self_attn.k_proj",
86
+ "model.layers.25.self_attn.o_proj",
87
+ "model.layers.22.mlp.down_proj",
88
+ "model.layers.18.self_attn.q_proj",
89
+ "model.layers.12.self_attn.q_proj",
90
+ "model.layers.5.mlp.down_proj",
91
+ "model.layers.17.self_attn.v_proj",
92
+ "model.layers.5.self_attn.q_proj",
93
+ "model.layers.13.self_attn.v_proj",
94
+ "model.layers.15.self_attn.q_proj",
95
+ "model.layers.22.self_attn.v_proj",
96
+ "model.layers.26.self_attn.o_proj",
97
+ "model.layers.14.self_attn.o_proj",
98
+ "model.layers.23.self_attn.k_proj",
99
+ "model.layers.1.self_attn.q_proj",
100
+ "model.layers.8.self_attn.k_proj",
101
+ "model.layers.9.self_attn.o_proj",
102
+ "model.layers.12.mlp.up_proj",
103
+ "model.layers.25.mlp.up_proj",
104
+ "model.layers.27.self_attn.o_proj",
105
+ "model.layers.9.mlp.up_proj",
106
+ "model.layers.23.self_attn.q_proj",
107
+ "model.layers.21.self_attn.v_proj",
108
+ "model.layers.20.self_attn.v_proj",
109
+ "model.layers.23.mlp.down_proj",
110
+ "model.layers.27.self_attn.k_proj",
111
+ "model.layers.7.mlp.down_proj",
112
+ "model.layers.11.mlp.gate_proj",
113
+ "model.layers.10.self_attn.o_proj",
114
+ "model.layers.19.mlp.gate_proj",
115
+ "model.layers.0.mlp.gate_proj",
116
+ "model.layers.20.self_attn.o_proj",
117
+ "model.layers.8.mlp.up_proj",
118
+ "model.layers.11.self_attn.v_proj",
119
+ "model.layers.7.mlp.up_proj",
120
+ "model.layers.3.self_attn.v_proj",
121
+ "model.layers.19.mlp.up_proj",
122
+ "model.layers.16.mlp.up_proj",
123
+ "model.layers.4.self_attn.q_proj",
124
+ "model.layers.21.mlp.gate_proj",
125
+ "model.layers.24.mlp.down_proj",
126
+ "model.layers.13.self_attn.k_proj",
127
+ "model.layers.3.self_attn.o_proj",
128
+ "model.layers.11.mlp.down_proj",
129
+ "model.layers.3.mlp.up_proj",
130
+ "model.layers.15.mlp.up_proj",
131
+ "model.layers.10.mlp.down_proj",
132
+ "model.layers.22.mlp.gate_proj",
133
+ "model.layers.18.self_attn.v_proj",
134
+ "model.layers.16.self_attn.q_proj",
135
+ "model.layers.17.mlp.up_proj",
136
+ "model.layers.4.self_attn.o_proj",
137
+ "model.layers.9.mlp.down_proj",
138
+ "model.layers.4.mlp.gate_proj",
139
+ "model.layers.5.self_attn.v_proj",
140
+ "model.layers.18.mlp.gate_proj",
141
+ "model.layers.13.mlp.down_proj",
142
+ "model.layers.8.self_attn.v_proj",
143
+ "model.layers.11.self_attn.q_proj",
144
+ "model.layers.26.self_attn.q_proj",
145
+ "model.layers.3.self_attn.k_proj",
146
+ "model.layers.16.mlp.gate_proj",
147
+ "model.layers.1.mlp.gate_proj",
148
+ "model.layers.20.mlp.gate_proj",
149
+ "model.layers.3.self_attn.q_proj",
150
+ "model.layers.2.self_attn.v_proj",
151
+ "model.layers.6.self_attn.q_proj",
152
+ "model.layers.0.mlp.up_proj",
153
+ "model.layers.15.self_attn.k_proj",
154
+ "model.layers.26.mlp.up_proj",
155
+ "model.layers.4.self_attn.v_proj",
156
+ "model.layers.2.self_attn.k_proj",
157
+ "model.layers.6.mlp.gate_proj",
158
+ "model.layers.10.self_attn.v_proj",
159
+ "model.layers.0.self_attn.v_proj",
160
+ "model.layers.9.mlp.gate_proj",
161
+ "model.layers.16.mlp.down_proj",
162
+ "model.layers.25.mlp.gate_proj",
163
+ "model.layers.12.self_attn.k_proj",
164
+ "model.layers.13.self_attn.o_proj",
165
+ "model.layers.26.mlp.gate_proj",
166
+ "model.layers.19.mlp.down_proj",
167
+ "model.layers.18.self_attn.o_proj",
168
+ "model.layers.21.mlp.down_proj",
169
+ "model.layers.9.self_attn.v_proj",
170
+ "model.layers.20.self_attn.q_proj",
171
+ "model.layers.23.mlp.gate_proj",
172
+ "model.layers.7.self_attn.v_proj",
173
+ "model.layers.5.self_attn.k_proj",
174
+ "model.layers.2.self_attn.o_proj",
175
+ "model.layers.23.self_attn.o_proj",
176
+ "model.layers.7.mlp.gate_proj",
177
+ "model.layers.12.mlp.down_proj",
178
+ "model.layers.19.self_attn.k_proj",
179
+ "model.layers.27.self_attn.q_proj",
180
+ "model.layers.10.mlp.gate_proj",
181
+ "model.layers.22.self_attn.k_proj",
182
+ "model.layers.0.mlp.down_proj",
183
+ "model.layers.6.self_attn.o_proj",
184
+ "model.layers.8.self_attn.q_proj",
185
+ "model.layers.19.self_attn.q_proj",
186
+ "model.layers.12.mlp.gate_proj",
187
+ "model.layers.6.mlp.up_proj",
188
+ "model.layers.7.self_attn.o_proj",
189
+ "model.layers.17.self_attn.o_proj",
190
+ "model.layers.5.self_attn.o_proj",
191
+ "model.layers.4.self_attn.k_proj",
192
+ "model.layers.10.self_attn.k_proj",
193
+ "model.layers.15.mlp.down_proj",
194
+ "model.layers.20.mlp.down_proj",
195
+ "model.layers.1.self_attn.v_proj",
196
+ "model.layers.15.mlp.gate_proj",
197
+ "model.layers.18.self_attn.k_proj",
198
+ "model.layers.18.mlp.down_proj",
199
+ "model.layers.15.self_attn.v_proj",
200
+ "model.layers.17.mlp.down_proj",
201
+ "model.layers.0.self_attn.k_proj",
202
+ "model.layers.25.self_attn.k_proj",
203
+ "model.layers.13.mlp.gate_proj",
204
+ "model.layers.24.self_attn.q_proj",
205
+ "model.layers.16.self_attn.o_proj",
206
+ "model.layers.24.self_attn.k_proj",
207
+ "model.layers.23.self_attn.v_proj",
208
+ "model.layers.25.self_attn.q_proj",
209
+ "model.layers.25.self_attn.v_proj",
210
+ "model.layers.22.self_attn.q_proj",
211
+ "model.layers.1.mlp.down_proj",
212
+ "model.layers.9.self_attn.q_proj",
213
+ "model.layers.24.self_attn.o_proj",
214
+ "model.layers.8.mlp.down_proj",
215
+ "model.layers.24.mlp.gate_proj",
216
+ "model.layers.21.self_attn.q_proj",
217
+ "model.layers.3.mlp.down_proj",
218
+ "model.layers.21.self_attn.o_proj"
219
+ ],
220
+ "task_type": "CAUSAL_LM",
221
+ "use_dora": false,
222
+ "use_rslora": false
223
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2870c56632dfa7eb666926a3b41839b7e17e75a7da0a77fdb8425561928d6bbc
3
+ size 80792096
added_tokens.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|box_end|>": 151649,
3
+ "<|box_start|>": 151648,
4
+ "<|endoftext|>": 151643,
5
+ "<|im_end|>": 151645,
6
+ "<|im_start|>": 151644,
7
+ "<|image_pad|>": 151655,
8
+ "<|object_ref_end|>": 151647,
9
+ "<|object_ref_start|>": 151646,
10
+ "<|quad_end|>": 151651,
11
+ "<|quad_start|>": 151650,
12
+ "<|video_pad|>": 151656,
13
+ "<|vision_end|>": 151653,
14
+ "<|vision_pad|>": 151654,
15
+ "<|vision_start|>": 151652
16
+ }
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.9904761904761905,
3
+ "num_input_tokens_seen": 8039616,
4
+ "total_flos": 3.746411207808123e+17,
5
+ "train_loss": 0.05875852833955716,
6
+ "train_runtime": 3746.4527,
7
+ "train_samples_per_second": 0.841,
8
+ "train_steps_per_second": 0.052
9
+ }
chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"
3
+ }
llamaboard_config.yaml ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ top.booster: auto
2
+ top.checkpoint_path: []
3
+ top.finetuning_type: lora
4
+ top.model_name: Qwen2-VL-7B-Instruct
5
+ top.quantization_bit: none
6
+ top.quantization_method: bitsandbytes
7
+ top.rope_scaling: none
8
+ top.template: qwen2_vl
9
+ train.additional_target: ''
10
+ train.apollo_rank: 16
11
+ train.apollo_scale: 32
12
+ train.apollo_target: all
13
+ train.apollo_update_interval: 200
14
+ train.badam_mode: layer
15
+ train.badam_switch_interval: 50
16
+ train.badam_switch_mode: ascending
17
+ train.badam_update_ratio: 0.05
18
+ train.batch_size: 2
19
+ train.compute_type: bf16
20
+ train.create_new_adapter: false
21
+ train.cutoff_len: 34114
22
+ train.dataset:
23
+ - BsKIE3
24
+ train.dataset_dir: data
25
+ train.ds_offload: false
26
+ train.ds_stage: none
27
+ train.extra_args: '{"optim": "adamw_torch"}'
28
+ train.freeze_extra_modules: ''
29
+ train.freeze_trainable_layers: 2
30
+ train.freeze_trainable_modules: all
31
+ train.galore_rank: 16
32
+ train.galore_scale: 2
33
+ train.galore_target: all
34
+ train.galore_update_interval: 200
35
+ train.gradient_accumulation_steps: 8
36
+ train.learning_rate: 5e-5
37
+ train.logging_steps: 5
38
+ train.lora_alpha: 16
39
+ train.lora_dropout: 0
40
+ train.lora_rank: 8
41
+ train.lora_target: ''
42
+ train.loraplus_lr_ratio: 0
43
+ train.lr_scheduler_type: cosine
44
+ train.mask_history: false
45
+ train.max_grad_norm: '1.0'
46
+ train.max_samples: '100000'
47
+ train.neat_packing: false
48
+ train.neftune_alpha: 0
49
+ train.num_train_epochs: '3.0'
50
+ train.packing: false
51
+ train.ppo_score_norm: false
52
+ train.ppo_whiten_rewards: false
53
+ train.pref_beta: 0.1
54
+ train.pref_ftx: 0
55
+ train.pref_loss: sigmoid
56
+ train.report_to:
57
+ - none
58
+ train.resize_vocab: false
59
+ train.reward_model: null
60
+ train.save_steps: 100
61
+ train.swanlab_api_key: ''
62
+ train.swanlab_link: ''
63
+ train.swanlab_mode: cloud
64
+ train.swanlab_project: llamafactory
65
+ train.swanlab_run_name: ''
66
+ train.swanlab_workspace: ''
67
+ train.train_on_prompt: false
68
+ train.training_stage: Supervised Fine-Tuning
69
+ train.use_apollo: false
70
+ train.use_badam: false
71
+ train.use_dora: false
72
+ train.use_galore: false
73
+ train.use_llama_pro: false
74
+ train.use_pissa: false
75
+ train.use_rslora: false
76
+ train.use_swanlab: false
77
+ train.val_size: 0
78
+ train.warmup_steps: 0
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": true,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.48145466,
8
+ 0.4578275,
9
+ 0.40821073
10
+ ],
11
+ "image_processor_type": "Qwen2VLImageProcessor",
12
+ "image_std": [
13
+ 0.26862954,
14
+ 0.26130258,
15
+ 0.27577711
16
+ ],
17
+ "max_pixels": 12845056,
18
+ "merge_size": 2,
19
+ "min_pixels": 3136,
20
+ "patch_size": 14,
21
+ "processor_class": "Qwen2VLProcessor",
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "longest_edge": 12845056,
26
+ "shortest_edge": 3136
27
+ },
28
+ "temporal_patch_size": 2
29
+ }
running_log.txt ADDED
@@ -0,0 +1,582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [INFO|2025-03-21 09:41:46] configuration_utils.py:699 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/eed13092ef92e448dd6875b2a00151bd3f7db0ac/config.json
2
+
3
+ [INFO|2025-03-21 09:41:46] configuration_utils.py:771 >> Model config Qwen2VLConfig {
4
+ "_name_or_path": "Qwen/Qwen2-VL-7B-Instruct",
5
+ "architectures": [
6
+ "Qwen2VLForConditionalGeneration"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 151643,
10
+ "eos_token_id": 151645,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 3584,
13
+ "image_token_id": 151655,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 18944,
16
+ "max_position_embeddings": 32768,
17
+ "max_window_layers": 28,
18
+ "model_type": "qwen2_vl",
19
+ "num_attention_heads": 28,
20
+ "num_hidden_layers": 28,
21
+ "num_key_value_heads": 4,
22
+ "rms_norm_eps": 1e-06,
23
+ "rope_scaling": {
24
+ "mrope_section": [
25
+ 16,
26
+ 24,
27
+ 24
28
+ ],
29
+ "rope_type": "default",
30
+ "type": "default"
31
+ },
32
+ "rope_theta": 1000000.0,
33
+ "sliding_window": 32768,
34
+ "tie_word_embeddings": false,
35
+ "torch_dtype": "bfloat16",
36
+ "transformers_version": "4.49.0",
37
+ "use_cache": true,
38
+ "use_sliding_window": false,
39
+ "video_token_id": 151656,
40
+ "vision_config": {
41
+ "in_chans": 3,
42
+ "model_type": "qwen2_vl",
43
+ "spatial_patch_size": 14
44
+ },
45
+ "vision_end_token_id": 151653,
46
+ "vision_start_token_id": 151652,
47
+ "vision_token_id": 151654,
48
+ "vocab_size": 152064
49
+ }
50
+
51
+
52
+ [INFO|2025-03-21 09:41:49] tokenization_utils_base.py:2050 >> loading file vocab.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/eed13092ef92e448dd6875b2a00151bd3f7db0ac/vocab.json
53
+
54
+ [INFO|2025-03-21 09:41:49] tokenization_utils_base.py:2050 >> loading file merges.txt from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/eed13092ef92e448dd6875b2a00151bd3f7db0ac/merges.txt
55
+
56
+ [INFO|2025-03-21 09:41:49] tokenization_utils_base.py:2050 >> loading file tokenizer.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/eed13092ef92e448dd6875b2a00151bd3f7db0ac/tokenizer.json
57
+
58
+ [INFO|2025-03-21 09:41:49] tokenization_utils_base.py:2050 >> loading file added_tokens.json from cache at None
59
+
60
+ [INFO|2025-03-21 09:41:49] tokenization_utils_base.py:2050 >> loading file special_tokens_map.json from cache at None
61
+
62
+ [INFO|2025-03-21 09:41:49] tokenization_utils_base.py:2050 >> loading file tokenizer_config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/eed13092ef92e448dd6875b2a00151bd3f7db0ac/tokenizer_config.json
63
+
64
+ [INFO|2025-03-21 09:41:49] tokenization_utils_base.py:2050 >> loading file chat_template.jinja from cache at None
65
+
66
+ [INFO|2025-03-21 09:41:49] tokenization_utils_base.py:2313 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
67
+
68
+ [INFO|2025-03-21 09:41:49] image_processing_base.py:381 >> loading configuration file preprocessor_config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/eed13092ef92e448dd6875b2a00151bd3f7db0ac/preprocessor_config.json
69
+
70
+ [INFO|2025-03-21 09:41:49] image_processing_base.py:381 >> loading configuration file preprocessor_config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/eed13092ef92e448dd6875b2a00151bd3f7db0ac/preprocessor_config.json
71
+
72
+ [WARNING|2025-03-21 09:41:49] logging.py:329 >> Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.48, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.
73
+
74
+ [INFO|2025-03-21 09:41:49] image_processing_base.py:434 >> Image processor Qwen2VLImageProcessor {
75
+ "do_convert_rgb": true,
76
+ "do_normalize": true,
77
+ "do_rescale": true,
78
+ "do_resize": true,
79
+ "image_mean": [
80
+ 0.48145466,
81
+ 0.4578275,
82
+ 0.40821073
83
+ ],
84
+ "image_processor_type": "Qwen2VLImageProcessor",
85
+ "image_std": [
86
+ 0.26862954,
87
+ 0.26130258,
88
+ 0.27577711
89
+ ],
90
+ "max_pixels": 12845056,
91
+ "merge_size": 2,
92
+ "min_pixels": 3136,
93
+ "patch_size": 14,
94
+ "processor_class": "Qwen2VLProcessor",
95
+ "resample": 3,
96
+ "rescale_factor": 0.00392156862745098,
97
+ "size": {
98
+ "longest_edge": 12845056,
99
+ "shortest_edge": 3136
100
+ },
101
+ "temporal_patch_size": 2
102
+ }
103
+
104
+
105
+ [INFO|2025-03-21 09:41:50] tokenization_utils_base.py:2050 >> loading file vocab.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/eed13092ef92e448dd6875b2a00151bd3f7db0ac/vocab.json
106
+
107
+ [INFO|2025-03-21 09:41:50] tokenization_utils_base.py:2050 >> loading file merges.txt from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/eed13092ef92e448dd6875b2a00151bd3f7db0ac/merges.txt
108
+
109
+ [INFO|2025-03-21 09:41:50] tokenization_utils_base.py:2050 >> loading file tokenizer.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/eed13092ef92e448dd6875b2a00151bd3f7db0ac/tokenizer.json
110
+
111
+ [INFO|2025-03-21 09:41:50] tokenization_utils_base.py:2050 >> loading file added_tokens.json from cache at None
112
+
113
+ [INFO|2025-03-21 09:41:50] tokenization_utils_base.py:2050 >> loading file special_tokens_map.json from cache at None
114
+
115
+ [INFO|2025-03-21 09:41:50] tokenization_utils_base.py:2050 >> loading file tokenizer_config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/eed13092ef92e448dd6875b2a00151bd3f7db0ac/tokenizer_config.json
116
+
117
+ [INFO|2025-03-21 09:41:50] tokenization_utils_base.py:2050 >> loading file chat_template.jinja from cache at None
118
+
119
+ [INFO|2025-03-21 09:41:50] tokenization_utils_base.py:2313 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
120
+
121
+ [INFO|2025-03-21 09:41:51] processing_utils.py:876 >> Processor Qwen2VLProcessor:
122
+ - image_processor: Qwen2VLImageProcessor {
123
+ "do_convert_rgb": true,
124
+ "do_normalize": true,
125
+ "do_rescale": true,
126
+ "do_resize": true,
127
+ "image_mean": [
128
+ 0.48145466,
129
+ 0.4578275,
130
+ 0.40821073
131
+ ],
132
+ "image_processor_type": "Qwen2VLImageProcessor",
133
+ "image_std": [
134
+ 0.26862954,
135
+ 0.26130258,
136
+ 0.27577711
137
+ ],
138
+ "max_pixels": 12845056,
139
+ "merge_size": 2,
140
+ "min_pixels": 3136,
141
+ "patch_size": 14,
142
+ "processor_class": "Qwen2VLProcessor",
143
+ "resample": 3,
144
+ "rescale_factor": 0.00392156862745098,
145
+ "size": {
146
+ "longest_edge": 12845056,
147
+ "shortest_edge": 3136
148
+ },
149
+ "temporal_patch_size": 2
150
+ }
151
+
152
+ - tokenizer: Qwen2TokenizerFast(name_or_path='Qwen/Qwen2-VL-7B-Instruct', vocab_size=151643, model_max_length=32768, is_fast=True, padding_side='left', truncation_side='right', special_tokens={'eos_token': '<|im_end|>', 'pad_token': '<|endoftext|>', 'additional_special_tokens': ['<|im_start|>', '<|im_end|>', '<|object_ref_start|>', '<|object_ref_end|>', '<|box_start|>', '<|box_end|>', '<|quad_start|>', '<|quad_end|>', '<|vision_start|>', '<|vision_end|>', '<|vision_pad|>', '<|image_pad|>', '<|video_pad|>']}, clean_up_tokenization_spaces=False, added_tokens_decoder={
153
+ 151643: AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
154
+ 151644: AddedToken("<|im_start|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
155
+ 151645: AddedToken("<|im_end|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
156
+ 151646: AddedToken("<|object_ref_start|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
157
+ 151647: AddedToken("<|object_ref_end|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
158
+ 151648: AddedToken("<|box_start|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
159
+ 151649: AddedToken("<|box_end|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
160
+ 151650: AddedToken("<|quad_start|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
161
+ 151651: AddedToken("<|quad_end|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
162
+ 151652: AddedToken("<|vision_start|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
163
+ 151653: AddedToken("<|vision_end|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
164
+ 151654: AddedToken("<|vision_pad|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
165
+ 151655: AddedToken("<|image_pad|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
166
+ 151656: AddedToken("<|video_pad|>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
167
+ }
168
+ )
169
+
170
+ {
171
+ "processor_class": "Qwen2VLProcessor"
172
+ }
173
+
174
+
175
+ [INFO|2025-03-21 09:41:51] logging.py:157 >> Add <|im_end|> to stop words.
176
+
177
+ [INFO|2025-03-21 09:41:51] logging.py:157 >> Loading dataset BsKIE3.json...
178
+
179
+ [INFO|2025-03-21 09:42:04] configuration_utils.py:699 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/eed13092ef92e448dd6875b2a00151bd3f7db0ac/config.json
180
+
181
+ [INFO|2025-03-21 09:42:04] configuration_utils.py:771 >> Model config Qwen2VLConfig {
182
+ "_name_or_path": "Qwen/Qwen2-VL-7B-Instruct",
183
+ "architectures": [
184
+ "Qwen2VLForConditionalGeneration"
185
+ ],
186
+ "attention_dropout": 0.0,
187
+ "bos_token_id": 151643,
188
+ "eos_token_id": 151645,
189
+ "hidden_act": "silu",
190
+ "hidden_size": 3584,
191
+ "image_token_id": 151655,
192
+ "initializer_range": 0.02,
193
+ "intermediate_size": 18944,
194
+ "max_position_embeddings": 32768,
195
+ "max_window_layers": 28,
196
+ "model_type": "qwen2_vl",
197
+ "num_attention_heads": 28,
198
+ "num_hidden_layers": 28,
199
+ "num_key_value_heads": 4,
200
+ "rms_norm_eps": 1e-06,
201
+ "rope_scaling": {
202
+ "mrope_section": [
203
+ 16,
204
+ 24,
205
+ 24
206
+ ],
207
+ "rope_type": "default",
208
+ "type": "default"
209
+ },
210
+ "rope_theta": 1000000.0,
211
+ "sliding_window": 32768,
212
+ "tie_word_embeddings": false,
213
+ "torch_dtype": "bfloat16",
214
+ "transformers_version": "4.49.0",
215
+ "use_cache": true,
216
+ "use_sliding_window": false,
217
+ "video_token_id": 151656,
218
+ "vision_config": {
219
+ "in_chans": 3,
220
+ "model_type": "qwen2_vl",
221
+ "spatial_patch_size": 14
222
+ },
223
+ "vision_end_token_id": 151653,
224
+ "vision_start_token_id": 151652,
225
+ "vision_token_id": 151654,
226
+ "vocab_size": 152064
227
+ }
228
+
229
+
230
+ [INFO|2025-03-21 09:42:04] modeling_utils.py:3982 >> loading weights file model.safetensors from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/eed13092ef92e448dd6875b2a00151bd3f7db0ac/model.safetensors.index.json
231
+
232
+ [INFO|2025-03-21 09:43:21] modeling_utils.py:1633 >> Instantiating Qwen2VLForConditionalGeneration model under default dtype torch.bfloat16.
233
+
234
+ [INFO|2025-03-21 09:43:21] configuration_utils.py:1140 >> Generate config GenerationConfig {
235
+ "bos_token_id": 151643,
236
+ "eos_token_id": 151645
237
+ }
238
+
239
+
240
+ [INFO|2025-03-21 09:43:21] modeling_utils.py:1633 >> Instantiating Qwen2VisionTransformerPretrainedModel model under default dtype torch.bfloat16.
241
+
242
+ [INFO|2025-03-21 09:43:24] modeling_utils.py:4970 >> All model checkpoint weights were used when initializing Qwen2VLForConditionalGeneration.
243
+
244
+
245
+ [INFO|2025-03-21 09:43:24] modeling_utils.py:4978 >> All the weights of Qwen2VLForConditionalGeneration were initialized from the model checkpoint at Qwen/Qwen2-VL-7B-Instruct.
246
+ If your task is similar to the task the model of the checkpoint was trained on, you can already use Qwen2VLForConditionalGeneration for predictions without further training.
247
+
248
+ [INFO|2025-03-21 09:43:24] configuration_utils.py:1095 >> loading configuration file generation_config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/eed13092ef92e448dd6875b2a00151bd3f7db0ac/generation_config.json
249
+
250
+ [INFO|2025-03-21 09:43:24] configuration_utils.py:1140 >> Generate config GenerationConfig {
251
+ "bos_token_id": 151643,
252
+ "do_sample": true,
253
+ "eos_token_id": [
254
+ 151645,
255
+ 151643
256
+ ],
257
+ "pad_token_id": 151643,
258
+ "temperature": 0.01,
259
+ "top_k": 1,
260
+ "top_p": 0.001
261
+ }
262
+
263
+
264
+ [INFO|2025-03-21 09:43:24] logging.py:157 >> Gradient checkpointing enabled.
265
+
266
+ [INFO|2025-03-21 09:43:24] logging.py:157 >> Using torch SDPA for faster training and inference.
267
+
268
+ [INFO|2025-03-21 09:43:24] logging.py:157 >> Upcasting trainable params to float32.
269
+
270
+ [INFO|2025-03-21 09:43:24] logging.py:157 >> Fine-tuning method: LoRA
271
+
272
+ [INFO|2025-03-21 09:43:24] logging.py:157 >> Found linear modules: k_proj,gate_proj,v_proj,o_proj,q_proj,up_proj,down_proj
273
+
274
+ [INFO|2025-03-21 09:43:24] logging.py:157 >> Set vision model not trainable: ['visual.patch_embed', 'visual.blocks'].
275
+
276
+ [INFO|2025-03-21 09:43:24] logging.py:157 >> Set multi model projector not trainable: visual.merger.
277
+
278
+ [INFO|2025-03-21 09:43:25] logging.py:157 >> trainable params: 20,185,088 || all params: 8,311,560,704 || trainable%: 0.2429
279
+
280
+ [INFO|2025-03-21 09:43:25] trainer.py:746 >> Using auto half precision backend
281
+
282
+ [WARNING|2025-03-21 09:43:25] trainer.py:781 >> No label_names provided for model class `PeftModelForCausalLM`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
283
+
284
+ [INFO|2025-03-21 09:43:25] trainer.py:2405 >> ***** Running training *****
285
+
286
+ [INFO|2025-03-21 09:43:25] trainer.py:2406 >> Num examples = 1,050
287
+
288
+ [INFO|2025-03-21 09:43:25] trainer.py:2407 >> Num Epochs = 3
289
+
290
+ [INFO|2025-03-21 09:43:25] trainer.py:2408 >> Instantaneous batch size per device = 2
291
+
292
+ [INFO|2025-03-21 09:43:25] trainer.py:2411 >> Total train batch size (w. parallel, distributed & accumulation) = 16
293
+
294
+ [INFO|2025-03-21 09:43:25] trainer.py:2412 >> Gradient Accumulation steps = 8
295
+
296
+ [INFO|2025-03-21 09:43:25] trainer.py:2413 >> Total optimization steps = 195
297
+
298
+ [INFO|2025-03-21 09:43:25] trainer.py:2414 >> Number of trainable parameters = 20,185,088
299
+
300
+ [INFO|2025-03-21 09:45:00] logging.py:157 >> {'loss': 0.2783, 'learning_rate': 4.9919e-05, 'epoch': 0.08, 'throughput': 2222.37}
301
+
302
+ [INFO|2025-03-21 09:46:36] logging.py:157 >> {'loss': 0.1989, 'learning_rate': 4.9676e-05, 'epoch': 0.15, 'throughput': 2194.13}
303
+
304
+ [INFO|2025-03-21 09:48:17] logging.py:157 >> {'loss': 0.1331, 'learning_rate': 4.9274e-05, 'epoch': 0.23, 'throughput': 2167.74}
305
+
306
+ [INFO|2025-03-21 09:49:53] logging.py:157 >> {'loss': 0.1029, 'learning_rate': 4.8713e-05, 'epoch': 0.30, 'throughput': 2159.97}
307
+
308
+ [INFO|2025-03-21 09:51:29] logging.py:157 >> {'loss': 0.0800, 'learning_rate': 4.7999e-05, 'epoch': 0.38, 'throughput': 2155.84}
309
+
310
+ [INFO|2025-03-21 09:53:04] logging.py:157 >> {'loss': 0.0767, 'learning_rate': 4.7136e-05, 'epoch': 0.46, 'throughput': 2151.49}
311
+
312
+ [INFO|2025-03-21 09:54:36] logging.py:157 >> {'loss': 0.0596, 'learning_rate': 4.6130e-05, 'epoch': 0.53, 'throughput': 2154.20}
313
+
314
+ [INFO|2025-03-21 09:56:13] logging.py:157 >> {'loss': 0.0619, 'learning_rate': 4.4986e-05, 'epoch': 0.61, 'throughput': 2151.53}
315
+
316
+ [INFO|2025-03-21 09:57:44] logging.py:157 >> {'loss': 0.0585, 'learning_rate': 4.3713e-05, 'epoch': 0.69, 'throughput': 2154.83}
317
+
318
+ [INFO|2025-03-21 09:59:21] logging.py:157 >> {'loss': 0.0642, 'learning_rate': 4.2318e-05, 'epoch': 0.76, 'throughput': 2151.56}
319
+
320
+ [INFO|2025-03-21 10:00:58] logging.py:157 >> {'loss': 0.0530, 'learning_rate': 4.0811e-05, 'epoch': 0.84, 'throughput': 2151.91}
321
+
322
+ [INFO|2025-03-21 10:02:34] logging.py:157 >> {'loss': 0.0566, 'learning_rate': 3.9202e-05, 'epoch': 0.91, 'throughput': 2151.74}
323
+
324
+ [INFO|2025-03-21 10:04:09] logging.py:157 >> {'loss': 0.0496, 'learning_rate': 3.7500e-05, 'epoch': 0.99, 'throughput': 2152.63}
325
+
326
+ [INFO|2025-03-21 10:05:52] logging.py:157 >> {'loss': 0.0577, 'learning_rate': 3.5717e-05, 'epoch': 1.08, 'throughput': 2151.34}
327
+
328
+ [INFO|2025-03-21 10:07:18] logging.py:157 >> {'loss': 0.0442, 'learning_rate': 3.3865e-05, 'epoch': 1.15, 'throughput': 2155.99}
329
+
330
+ [INFO|2025-03-21 10:08:57] logging.py:157 >> {'loss': 0.0469, 'learning_rate': 3.1955e-05, 'epoch': 1.23, 'throughput': 2152.47}
331
+
332
+ [INFO|2025-03-21 10:10:36] logging.py:157 >> {'loss': 0.0417, 'learning_rate': 3.0001e-05, 'epoch': 1.30, 'throughput': 2151.23}
333
+
334
+ [INFO|2025-03-21 10:12:08] logging.py:157 >> {'loss': 0.0451, 'learning_rate': 2.8013e-05, 'epoch': 1.38, 'throughput': 2151.68}
335
+
336
+ [INFO|2025-03-21 10:13:45] logging.py:157 >> {'loss': 0.0438, 'learning_rate': 2.6007e-05, 'epoch': 1.46, 'throughput': 2150.08}
337
+
338
+ [INFO|2025-03-21 10:15:23] logging.py:157 >> {'loss': 0.0380, 'learning_rate': 2.3993e-05, 'epoch': 1.53, 'throughput': 2149.01}
339
+
340
+ [INFO|2025-03-21 10:15:23] trainer.py:3942 >> Saving model checkpoint to saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/checkpoint-100
341
+
342
+ [INFO|2025-03-21 10:15:27] configuration_utils.py:699 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/eed13092ef92e448dd6875b2a00151bd3f7db0ac/config.json
343
+
344
+ [INFO|2025-03-21 10:15:27] configuration_utils.py:771 >> Model config Qwen2VLConfig {
345
+ "architectures": [
346
+ "Qwen2VLForConditionalGeneration"
347
+ ],
348
+ "attention_dropout": 0.0,
349
+ "bos_token_id": 151643,
350
+ "eos_token_id": 151645,
351
+ "hidden_act": "silu",
352
+ "hidden_size": 3584,
353
+ "image_token_id": 151655,
354
+ "initializer_range": 0.02,
355
+ "intermediate_size": 18944,
356
+ "max_position_embeddings": 32768,
357
+ "max_window_layers": 28,
358
+ "model_type": "qwen2_vl",
359
+ "num_attention_heads": 28,
360
+ "num_hidden_layers": 28,
361
+ "num_key_value_heads": 4,
362
+ "rms_norm_eps": 1e-06,
363
+ "rope_scaling": {
364
+ "mrope_section": [
365
+ 16,
366
+ 24,
367
+ 24
368
+ ],
369
+ "rope_type": "default",
370
+ "type": "default"
371
+ },
372
+ "rope_theta": 1000000.0,
373
+ "sliding_window": 32768,
374
+ "tie_word_embeddings": false,
375
+ "torch_dtype": "bfloat16",
376
+ "transformers_version": "4.49.0",
377
+ "use_cache": true,
378
+ "use_sliding_window": false,
379
+ "video_token_id": 151656,
380
+ "vision_config": {
381
+ "in_chans": 3,
382
+ "model_type": "qwen2_vl",
383
+ "spatial_patch_size": 14
384
+ },
385
+ "vision_end_token_id": 151653,
386
+ "vision_start_token_id": 151652,
387
+ "vision_token_id": 151654,
388
+ "vocab_size": 152064
389
+ }
390
+
391
+
392
+ [INFO|2025-03-21 10:15:27] tokenization_utils_base.py:2500 >> tokenizer config file saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/checkpoint-100/tokenizer_config.json
393
+
394
+ [INFO|2025-03-21 10:15:27] tokenization_utils_base.py:2509 >> Special tokens file saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/checkpoint-100/special_tokens_map.json
395
+
396
+ [INFO|2025-03-21 10:15:27] image_processing_base.py:261 >> Image processor saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/checkpoint-100/preprocessor_config.json
397
+
398
+ [INFO|2025-03-21 10:15:27] tokenization_utils_base.py:2500 >> tokenizer config file saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/checkpoint-100/tokenizer_config.json
399
+
400
+ [INFO|2025-03-21 10:15:27] tokenization_utils_base.py:2509 >> Special tokens file saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/checkpoint-100/special_tokens_map.json
401
+
402
+ [INFO|2025-03-21 10:15:27] processing_utils.py:638 >> chat template saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/checkpoint-100/chat_template.json
403
+
404
+ [INFO|2025-03-21 10:17:03] logging.py:157 >> {'loss': 0.0380, 'learning_rate': 2.1987e-05, 'epoch': 1.61, 'throughput': 2144.53}
405
+
406
+ [INFO|2025-03-21 10:18:43] logging.py:157 >> {'loss': 0.0397, 'learning_rate': 1.9999e-05, 'epoch': 1.69, 'throughput': 2142.88}
407
+
408
+ [INFO|2025-03-21 10:20:19] logging.py:157 >> {'loss': 0.0404, 'learning_rate': 1.8045e-05, 'epoch': 1.76, 'throughput': 2142.72}
409
+
410
+ [INFO|2025-03-21 10:21:58] logging.py:157 >> {'loss': 0.0362, 'learning_rate': 1.6135e-05, 'epoch': 1.84, 'throughput': 2142.84}
411
+
412
+ [INFO|2025-03-21 10:23:28] logging.py:157 >> {'loss': 0.0397, 'learning_rate': 1.4283e-05, 'epoch': 1.91, 'throughput': 2145.15}
413
+
414
+ [INFO|2025-03-21 10:25:02] logging.py:157 >> {'loss': 0.0343, 'learning_rate': 1.2500e-05, 'epoch': 1.99, 'throughput': 2146.04}
415
+
416
+ [INFO|2025-03-21 10:26:45] logging.py:157 >> {'loss': 0.0396, 'learning_rate': 1.0798e-05, 'epoch': 2.08, 'throughput': 2145.80}
417
+
418
+ [INFO|2025-03-21 10:28:14] logging.py:157 >> {'loss': 0.0394, 'learning_rate': 9.1889e-06, 'epoch': 2.15, 'throughput': 2147.67}
419
+
420
+ [INFO|2025-03-21 10:29:54] logging.py:157 >> {'loss': 0.0311, 'learning_rate': 7.6819e-06, 'epoch': 2.23, 'throughput': 2146.29}
421
+
422
+ [INFO|2025-03-21 10:31:26] logging.py:157 >> {'loss': 0.0480, 'learning_rate': 6.2872e-06, 'epoch': 2.30, 'throughput': 2147.33}
423
+
424
+ [INFO|2025-03-21 10:33:00] logging.py:157 >> {'loss': 0.0374, 'learning_rate': 5.0139e-06, 'epoch': 2.38, 'throughput': 2148.07}
425
+
426
+ [INFO|2025-03-21 10:34:34] logging.py:157 >> {'loss': 0.0334, 'learning_rate': 3.8702e-06, 'epoch': 2.46, 'throughput': 2148.53}
427
+
428
+ [INFO|2025-03-21 10:36:10] logging.py:157 >> {'loss': 0.0324, 'learning_rate': 2.8636e-06, 'epoch': 2.53, 'throughput': 2148.97}
429
+
430
+ [INFO|2025-03-21 10:37:44] logging.py:157 >> {'loss': 0.0342, 'learning_rate': 2.0005e-06, 'epoch': 2.61, 'throughput': 2149.31}
431
+
432
+ [INFO|2025-03-21 10:39:19] logging.py:157 >> {'loss': 0.0384, 'learning_rate': 1.2866e-06, 'epoch': 2.69, 'throughput': 2149.34}
433
+
434
+ [INFO|2025-03-21 10:40:58] logging.py:157 >> {'loss': 0.0357, 'learning_rate': 7.2645e-07, 'epoch': 2.76, 'throughput': 2148.19}
435
+
436
+ [INFO|2025-03-21 10:42:43] logging.py:157 >> {'loss': 0.0351, 'learning_rate': 3.2374e-07, 'epoch': 2.84, 'throughput': 2145.47}
437
+
438
+ [INFO|2025-03-21 10:44:18] logging.py:157 >> {'loss': 0.0324, 'learning_rate': 8.1067e-08, 'epoch': 2.91, 'throughput': 2146.04}
439
+
440
+ [INFO|2025-03-21 10:45:51] logging.py:157 >> {'loss': 0.0357, 'learning_rate': 0.0000e+00, 'epoch': 2.99, 'throughput': 2146.55}
441
+
442
+ [INFO|2025-03-21 10:45:51] trainer.py:3942 >> Saving model checkpoint to saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/checkpoint-195
443
+
444
+ [INFO|2025-03-21 10:45:51] configuration_utils.py:699 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/eed13092ef92e448dd6875b2a00151bd3f7db0ac/config.json
445
+
446
+ [INFO|2025-03-21 10:45:51] configuration_utils.py:771 >> Model config Qwen2VLConfig {
447
+ "architectures": [
448
+ "Qwen2VLForConditionalGeneration"
449
+ ],
450
+ "attention_dropout": 0.0,
451
+ "bos_token_id": 151643,
452
+ "eos_token_id": 151645,
453
+ "hidden_act": "silu",
454
+ "hidden_size": 3584,
455
+ "image_token_id": 151655,
456
+ "initializer_range": 0.02,
457
+ "intermediate_size": 18944,
458
+ "max_position_embeddings": 32768,
459
+ "max_window_layers": 28,
460
+ "model_type": "qwen2_vl",
461
+ "num_attention_heads": 28,
462
+ "num_hidden_layers": 28,
463
+ "num_key_value_heads": 4,
464
+ "rms_norm_eps": 1e-06,
465
+ "rope_scaling": {
466
+ "mrope_section": [
467
+ 16,
468
+ 24,
469
+ 24
470
+ ],
471
+ "rope_type": "default",
472
+ "type": "default"
473
+ },
474
+ "rope_theta": 1000000.0,
475
+ "sliding_window": 32768,
476
+ "tie_word_embeddings": false,
477
+ "torch_dtype": "bfloat16",
478
+ "transformers_version": "4.49.0",
479
+ "use_cache": true,
480
+ "use_sliding_window": false,
481
+ "video_token_id": 151656,
482
+ "vision_config": {
483
+ "in_chans": 3,
484
+ "model_type": "qwen2_vl",
485
+ "spatial_patch_size": 14
486
+ },
487
+ "vision_end_token_id": 151653,
488
+ "vision_start_token_id": 151652,
489
+ "vision_token_id": 151654,
490
+ "vocab_size": 152064
491
+ }
492
+
493
+
494
+ [INFO|2025-03-21 10:45:51] tokenization_utils_base.py:2500 >> tokenizer config file saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/checkpoint-195/tokenizer_config.json
495
+
496
+ [INFO|2025-03-21 10:45:51] tokenization_utils_base.py:2509 >> Special tokens file saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/checkpoint-195/special_tokens_map.json
497
+
498
+ [INFO|2025-03-21 10:45:51] image_processing_base.py:261 >> Image processor saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/checkpoint-195/preprocessor_config.json
499
+
500
+ [INFO|2025-03-21 10:45:51] tokenization_utils_base.py:2500 >> tokenizer config file saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/checkpoint-195/tokenizer_config.json
501
+
502
+ [INFO|2025-03-21 10:45:51] tokenization_utils_base.py:2509 >> Special tokens file saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/checkpoint-195/special_tokens_map.json
503
+
504
+ [INFO|2025-03-21 10:45:52] processing_utils.py:638 >> chat template saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/checkpoint-195/chat_template.json
505
+
506
+ [INFO|2025-03-21 10:45:52] trainer.py:2657 >>
507
+
508
+ Training completed. Do not forget to share your model on huggingface.co/models =)
509
+
510
+
511
+
512
+ [INFO|2025-03-21 10:45:52] image_processing_base.py:261 >> Image processor saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/preprocessor_config.json
513
+
514
+ [INFO|2025-03-21 10:45:52] tokenization_utils_base.py:2500 >> tokenizer config file saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/tokenizer_config.json
515
+
516
+ [INFO|2025-03-21 10:45:52] tokenization_utils_base.py:2509 >> Special tokens file saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/special_tokens_map.json
517
+
518
+ [INFO|2025-03-21 10:45:52] processing_utils.py:638 >> chat template saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/chat_template.json
519
+
520
+ [INFO|2025-03-21 10:45:52] trainer.py:3942 >> Saving model checkpoint to saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora
521
+
522
+ [INFO|2025-03-21 10:45:52] configuration_utils.py:699 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/eed13092ef92e448dd6875b2a00151bd3f7db0ac/config.json
523
+
524
+ [INFO|2025-03-21 10:45:52] configuration_utils.py:771 >> Model config Qwen2VLConfig {
525
+ "architectures": [
526
+ "Qwen2VLForConditionalGeneration"
527
+ ],
528
+ "attention_dropout": 0.0,
529
+ "bos_token_id": 151643,
530
+ "eos_token_id": 151645,
531
+ "hidden_act": "silu",
532
+ "hidden_size": 3584,
533
+ "image_token_id": 151655,
534
+ "initializer_range": 0.02,
535
+ "intermediate_size": 18944,
536
+ "max_position_embeddings": 32768,
537
+ "max_window_layers": 28,
538
+ "model_type": "qwen2_vl",
539
+ "num_attention_heads": 28,
540
+ "num_hidden_layers": 28,
541
+ "num_key_value_heads": 4,
542
+ "rms_norm_eps": 1e-06,
543
+ "rope_scaling": {
544
+ "mrope_section": [
545
+ 16,
546
+ 24,
547
+ 24
548
+ ],
549
+ "rope_type": "default",
550
+ "type": "default"
551
+ },
552
+ "rope_theta": 1000000.0,
553
+ "sliding_window": 32768,
554
+ "tie_word_embeddings": false,
555
+ "torch_dtype": "bfloat16",
556
+ "transformers_version": "4.49.0",
557
+ "use_cache": true,
558
+ "use_sliding_window": false,
559
+ "video_token_id": 151656,
560
+ "vision_config": {
561
+ "in_chans": 3,
562
+ "model_type": "qwen2_vl",
563
+ "spatial_patch_size": 14
564
+ },
565
+ "vision_end_token_id": 151653,
566
+ "vision_start_token_id": 151652,
567
+ "vision_token_id": 151654,
568
+ "vocab_size": 152064
569
+ }
570
+
571
+
572
+ [INFO|2025-03-21 10:45:52] tokenization_utils_base.py:2500 >> tokenizer config file saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/tokenizer_config.json
573
+
574
+ [INFO|2025-03-21 10:45:52] tokenization_utils_base.py:2509 >> Special tokens file saved in saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora/special_tokens_map.json
575
+
576
+ [WARNING|2025-03-21 10:45:52] logging.py:162 >> No metric eval_loss to plot.
577
+
578
+ [WARNING|2025-03-21 10:45:52] logging.py:162 >> No metric eval_accuracy to plot.
579
+
580
+ [INFO|2025-03-21 10:45:52] modelcard.py:449 >> Dropping the following result as it does not have all the necessary fields:
581
+ {'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
582
+
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:091aa7594dc2fcfbfa06b9e3c22a5f0562ac14f30375c13af7309407a0e67b8a
3
+ size 11420371
tokenizer_config.json ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "151646": {
29
+ "content": "<|object_ref_start|>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "151647": {
37
+ "content": "<|object_ref_end|>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "151648": {
45
+ "content": "<|box_start|>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "151649": {
53
+ "content": "<|box_end|>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "151650": {
61
+ "content": "<|quad_start|>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "151651": {
69
+ "content": "<|quad_end|>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "151652": {
77
+ "content": "<|vision_start|>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "151653": {
85
+ "content": "<|vision_end|>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "151654": {
93
+ "content": "<|vision_pad|>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "151655": {
101
+ "content": "<|image_pad|>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "151656": {
109
+ "content": "<|video_pad|>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ }
116
+ },
117
+ "additional_special_tokens": [
118
+ "<|im_start|>",
119
+ "<|im_end|>",
120
+ "<|object_ref_start|>",
121
+ "<|object_ref_end|>",
122
+ "<|box_start|>",
123
+ "<|box_end|>",
124
+ "<|quad_start|>",
125
+ "<|quad_end|>",
126
+ "<|vision_start|>",
127
+ "<|vision_end|>",
128
+ "<|vision_pad|>",
129
+ "<|image_pad|>",
130
+ "<|video_pad|>"
131
+ ],
132
+ "bos_token": null,
133
+ "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}",
134
+ "clean_up_tokenization_spaces": false,
135
+ "eos_token": "<|im_end|>",
136
+ "errors": "replace",
137
+ "extra_special_tokens": {},
138
+ "model_max_length": 34114,
139
+ "pad_token": "<|endoftext|>",
140
+ "padding_side": "right",
141
+ "processor_class": "Qwen2VLProcessor",
142
+ "split_special_tokens": false,
143
+ "tokenizer_class": "Qwen2Tokenizer",
144
+ "unk_token": null
145
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.9904761904761905,
3
+ "num_input_tokens_seen": 8039616,
4
+ "total_flos": 3.746411207808123e+17,
5
+ "train_loss": 0.05875852833955716,
6
+ "train_runtime": 3746.4527,
7
+ "train_samples_per_second": 0.841,
8
+ "train_steps_per_second": 0.052
9
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 5, "total_steps": 195, "loss": 0.2783, "lr": 4.9918932703355256e-05, "epoch": 0.0761904761904762, "percentage": 2.56, "elapsed_time": "0:01:35", "remaining_time": "1:00:12", "throughput": 2222.37, "total_tokens": 211280}
2
+ {"current_steps": 10, "total_steps": 195, "loss": 0.1989, "lr": 4.967625656594782e-05, "epoch": 0.1523809523809524, "percentage": 5.13, "elapsed_time": "0:03:11", "remaining_time": "0:58:53", "throughput": 2194.13, "total_tokens": 419136}
3
+ {"current_steps": 15, "total_steps": 195, "loss": 0.1331, "lr": 4.92735454356513e-05, "epoch": 0.22857142857142856, "percentage": 7.69, "elapsed_time": "0:04:51", "remaining_time": "0:58:16", "throughput": 2167.74, "total_tokens": 631632}
4
+ {"current_steps": 20, "total_steps": 195, "loss": 0.1029, "lr": 4.8713411048678635e-05, "epoch": 0.3047619047619048, "percentage": 10.26, "elapsed_time": "0:06:27", "remaining_time": "0:56:30", "throughput": 2159.97, "total_tokens": 836896}
5
+ {"current_steps": 25, "total_steps": 195, "loss": 0.08, "lr": 4.799948609147061e-05, "epoch": 0.38095238095238093, "percentage": 12.82, "elapsed_time": "0:08:03", "remaining_time": "0:54:48", "throughput": 2155.84, "total_tokens": 1042416}
6
+ {"current_steps": 30, "total_steps": 195, "loss": 0.0767, "lr": 4.713640064133025e-05, "epoch": 0.45714285714285713, "percentage": 15.38, "elapsed_time": "0:09:38", "remaining_time": "0:53:01", "throughput": 2151.49, "total_tokens": 1244384}
7
+ {"current_steps": 35, "total_steps": 195, "loss": 0.0596, "lr": 4.6129752138594874e-05, "epoch": 0.5333333333333333, "percentage": 17.95, "elapsed_time": "0:11:10", "remaining_time": "0:51:06", "throughput": 2154.2, "total_tokens": 1445168}
8
+ {"current_steps": 40, "total_steps": 195, "loss": 0.0619, "lr": 4.498606908508754e-05, "epoch": 0.6095238095238096, "percentage": 20.51, "elapsed_time": "0:12:47", "remaining_time": "0:49:34", "throughput": 2151.53, "total_tokens": 1651760}
9
+ {"current_steps": 45, "total_steps": 195, "loss": 0.0585, "lr": 4.371276870427753e-05, "epoch": 0.6857142857142857, "percentage": 23.08, "elapsed_time": "0:14:18", "remaining_time": "0:47:41", "throughput": 2154.83, "total_tokens": 1849920}
10
+ {"current_steps": 50, "total_steps": 195, "loss": 0.0642, "lr": 4.231810883773999e-05, "epoch": 0.7619047619047619, "percentage": 25.64, "elapsed_time": "0:15:55", "remaining_time": "0:46:11", "throughput": 2151.56, "total_tokens": 2056592}
11
+ {"current_steps": 55, "total_steps": 195, "loss": 0.053, "lr": 4.0811134389884433e-05, "epoch": 0.8380952380952381, "percentage": 28.21, "elapsed_time": "0:17:32", "remaining_time": "0:44:39", "throughput": 2151.91, "total_tokens": 2265056}
12
+ {"current_steps": 60, "total_steps": 195, "loss": 0.0566, "lr": 3.920161866827889e-05, "epoch": 0.9142857142857143, "percentage": 30.77, "elapsed_time": "0:19:08", "remaining_time": "0:43:04", "throughput": 2151.74, "total_tokens": 2471328}
13
+ {"current_steps": 65, "total_steps": 195, "loss": 0.0496, "lr": 3.7500000000000003e-05, "epoch": 0.9904761904761905, "percentage": 33.33, "elapsed_time": "0:20:43", "remaining_time": "0:41:26", "throughput": 2152.63, "total_tokens": 2676640}
14
+ {"current_steps": 70, "total_steps": 195, "loss": 0.0577, "lr": 3.5717314035076355e-05, "epoch": 1.0761904761904761, "percentage": 35.9, "elapsed_time": "0:22:26", "remaining_time": "0:40:05", "throughput": 2151.34, "total_tokens": 2897712}
15
+ {"current_steps": 75, "total_steps": 195, "loss": 0.0442, "lr": 3.386512217606339e-05, "epoch": 1.1523809523809523, "percentage": 38.46, "elapsed_time": "0:23:53", "remaining_time": "0:38:12", "throughput": 2155.99, "total_tokens": 3089808}
16
+ {"current_steps": 80, "total_steps": 195, "loss": 0.0469, "lr": 3.195543659791132e-05, "epoch": 1.2285714285714286, "percentage": 41.03, "elapsed_time": "0:25:31", "remaining_time": "0:36:41", "throughput": 2152.47, "total_tokens": 3296576}
17
+ {"current_steps": 85, "total_steps": 195, "loss": 0.0417, "lr": 3.0000642344401113e-05, "epoch": 1.3047619047619048, "percentage": 43.59, "elapsed_time": "0:27:10", "remaining_time": "0:35:10", "throughput": 2151.23, "total_tokens": 3508416}
18
+ {"current_steps": 90, "total_steps": 195, "loss": 0.0451, "lr": 2.8013417006383076e-05, "epoch": 1.380952380952381, "percentage": 46.15, "elapsed_time": "0:28:42", "remaining_time": "0:33:29", "throughput": 2151.68, "total_tokens": 3706512}
19
+ {"current_steps": 95, "total_steps": 195, "loss": 0.0438, "lr": 2.600664850273538e-05, "epoch": 1.457142857142857, "percentage": 48.72, "elapsed_time": "0:30:19", "remaining_time": "0:31:55", "throughput": 2150.08, "total_tokens": 3913120}
20
+ {"current_steps": 100, "total_steps": 195, "loss": 0.038, "lr": 2.399335149726463e-05, "epoch": 1.5333333333333332, "percentage": 51.28, "elapsed_time": "0:31:57", "remaining_time": "0:30:21", "throughput": 2149.01, "total_tokens": 4121248}
21
+ {"current_steps": 105, "total_steps": 195, "loss": 0.038, "lr": 2.1986582993616926e-05, "epoch": 1.6095238095238096, "percentage": 53.85, "elapsed_time": "0:33:38", "remaining_time": "0:28:49", "throughput": 2144.53, "total_tokens": 4327824}
22
+ {"current_steps": 110, "total_steps": 195, "loss": 0.0397, "lr": 1.9999357655598893e-05, "epoch": 1.6857142857142857, "percentage": 56.41, "elapsed_time": "0:35:17", "remaining_time": "0:27:16", "throughput": 2142.88, "total_tokens": 4537936}
23
+ {"current_steps": 115, "total_steps": 195, "loss": 0.0404, "lr": 1.8044563402088684e-05, "epoch": 1.7619047619047619, "percentage": 58.97, "elapsed_time": "0:36:54", "remaining_time": "0:25:40", "throughput": 2142.72, "total_tokens": 4744432}
24
+ {"current_steps": 120, "total_steps": 195, "loss": 0.0362, "lr": 1.613487782393661e-05, "epoch": 1.8380952380952382, "percentage": 61.54, "elapsed_time": "0:38:32", "remaining_time": "0:24:05", "throughput": 2142.84, "total_tokens": 4955296}
25
+ {"current_steps": 125, "total_steps": 195, "loss": 0.0397, "lr": 1.4282685964923642e-05, "epoch": 1.9142857142857141, "percentage": 64.1, "elapsed_time": "0:40:02", "remaining_time": "0:22:25", "throughput": 2145.15, "total_tokens": 5154784}
26
+ {"current_steps": 130, "total_steps": 195, "loss": 0.0343, "lr": 1.2500000000000006e-05, "epoch": 1.9904761904761905, "percentage": 66.67, "elapsed_time": "0:41:36", "remaining_time": "0:20:48", "throughput": 2146.04, "total_tokens": 5358160}
27
+ {"current_steps": 135, "total_steps": 195, "loss": 0.0396, "lr": 1.0798381331721109e-05, "epoch": 2.0761904761904764, "percentage": 69.23, "elapsed_time": "0:43:19", "remaining_time": "0:19:15", "throughput": 2145.8, "total_tokens": 5577808}
28
+ {"current_steps": 140, "total_steps": 195, "loss": 0.0394, "lr": 9.18886561011557e-06, "epoch": 2.1523809523809523, "percentage": 71.79, "elapsed_time": "0:44:49", "remaining_time": "0:17:36", "throughput": 2147.67, "total_tokens": 5775552}
29
+ {"current_steps": 145, "total_steps": 195, "loss": 0.0311, "lr": 7.681891162260015e-06, "epoch": 2.2285714285714286, "percentage": 74.36, "elapsed_time": "0:46:29", "remaining_time": "0:16:01", "throughput": 2146.29, "total_tokens": 5986112}
30
+ {"current_steps": 150, "total_steps": 195, "loss": 0.048, "lr": 6.28723129572247e-06, "epoch": 2.3047619047619046, "percentage": 76.92, "elapsed_time": "0:48:00", "remaining_time": "0:14:24", "throughput": 2147.33, "total_tokens": 6185568}
31
+ {"current_steps": 155, "total_steps": 195, "loss": 0.0374, "lr": 5.013930914912476e-06, "epoch": 2.380952380952381, "percentage": 79.49, "elapsed_time": "0:49:34", "remaining_time": "0:12:47", "throughput": 2148.07, "total_tokens": 6389392}
32
+ {"current_steps": 160, "total_steps": 195, "loss": 0.0334, "lr": 3.8702478614051355e-06, "epoch": 2.4571428571428573, "percentage": 82.05, "elapsed_time": "0:51:09", "remaining_time": "0:11:11", "throughput": 2148.53, "total_tokens": 6594256}
33
+ {"current_steps": 165, "total_steps": 195, "loss": 0.0324, "lr": 2.8635993586697553e-06, "epoch": 2.533333333333333, "percentage": 84.62, "elapsed_time": "0:52:45", "remaining_time": "0:09:35", "throughput": 2148.97, "total_tokens": 6802240}
34
+ {"current_steps": 170, "total_steps": 195, "loss": 0.0342, "lr": 2.0005139085293945e-06, "epoch": 2.6095238095238096, "percentage": 87.18, "elapsed_time": "0:54:19", "remaining_time": "0:07:59", "throughput": 2149.31, "total_tokens": 7004816}
35
+ {"current_steps": 175, "total_steps": 195, "loss": 0.0384, "lr": 1.286588951321363e-06, "epoch": 2.685714285714286, "percentage": 89.74, "elapsed_time": "0:55:53", "remaining_time": "0:06:23", "throughput": 2149.34, "total_tokens": 7208608}
36
+ {"current_steps": 180, "total_steps": 195, "loss": 0.0357, "lr": 7.264545643486997e-07, "epoch": 2.761904761904762, "percentage": 92.31, "elapsed_time": "0:57:33", "remaining_time": "0:04:47", "throughput": 2148.19, "total_tokens": 7418320}
37
+ {"current_steps": 185, "total_steps": 195, "loss": 0.0351, "lr": 3.237434340521789e-07, "epoch": 2.8380952380952382, "percentage": 94.87, "elapsed_time": "0:59:17", "remaining_time": "0:03:12", "throughput": 2145.47, "total_tokens": 7633248}
38
+ {"current_steps": 190, "total_steps": 195, "loss": 0.0324, "lr": 8.106729664475176e-08, "epoch": 2.914285714285714, "percentage": 97.44, "elapsed_time": "1:00:52", "remaining_time": "0:01:36", "throughput": 2146.04, "total_tokens": 7838576}
39
+ {"current_steps": 195, "total_steps": 195, "loss": 0.0357, "lr": 0.0, "epoch": 2.9904761904761905, "percentage": 100.0, "elapsed_time": "1:02:25", "remaining_time": "0:00:00", "throughput": 2146.55, "total_tokens": 8039616}
40
+ {"current_steps": 195, "total_steps": 195, "epoch": 2.9904761904761905, "percentage": 100.0, "elapsed_time": "1:02:26", "remaining_time": "0:00:00", "throughput": 2145.93, "total_tokens": 8039616}
trainer_state.json ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.9904761904761905,
5
+ "eval_steps": 500,
6
+ "global_step": 195,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0761904761904762,
13
+ "grad_norm": 0.25375691056251526,
14
+ "learning_rate": 4.9918932703355256e-05,
15
+ "loss": 0.2783,
16
+ "num_input_tokens_seen": 211280,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.1523809523809524,
21
+ "grad_norm": 0.1986791342496872,
22
+ "learning_rate": 4.967625656594782e-05,
23
+ "loss": 0.1989,
24
+ "num_input_tokens_seen": 419136,
25
+ "step": 10
26
+ },
27
+ {
28
+ "epoch": 0.22857142857142856,
29
+ "grad_norm": 0.1764397770166397,
30
+ "learning_rate": 4.92735454356513e-05,
31
+ "loss": 0.1331,
32
+ "num_input_tokens_seen": 631632,
33
+ "step": 15
34
+ },
35
+ {
36
+ "epoch": 0.3047619047619048,
37
+ "grad_norm": 0.14602281153202057,
38
+ "learning_rate": 4.8713411048678635e-05,
39
+ "loss": 0.1029,
40
+ "num_input_tokens_seen": 836896,
41
+ "step": 20
42
+ },
43
+ {
44
+ "epoch": 0.38095238095238093,
45
+ "grad_norm": 0.14543306827545166,
46
+ "learning_rate": 4.799948609147061e-05,
47
+ "loss": 0.08,
48
+ "num_input_tokens_seen": 1042416,
49
+ "step": 25
50
+ },
51
+ {
52
+ "epoch": 0.45714285714285713,
53
+ "grad_norm": 0.14905446767807007,
54
+ "learning_rate": 4.713640064133025e-05,
55
+ "loss": 0.0767,
56
+ "num_input_tokens_seen": 1244384,
57
+ "step": 30
58
+ },
59
+ {
60
+ "epoch": 0.5333333333333333,
61
+ "grad_norm": 0.12417428195476532,
62
+ "learning_rate": 4.6129752138594874e-05,
63
+ "loss": 0.0596,
64
+ "num_input_tokens_seen": 1445168,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 0.6095238095238096,
69
+ "grad_norm": 0.1181894913315773,
70
+ "learning_rate": 4.498606908508754e-05,
71
+ "loss": 0.0619,
72
+ "num_input_tokens_seen": 1651760,
73
+ "step": 40
74
+ },
75
+ {
76
+ "epoch": 0.6857142857142857,
77
+ "grad_norm": 0.13109543919563293,
78
+ "learning_rate": 4.371276870427753e-05,
79
+ "loss": 0.0585,
80
+ "num_input_tokens_seen": 1849920,
81
+ "step": 45
82
+ },
83
+ {
84
+ "epoch": 0.7619047619047619,
85
+ "grad_norm": 0.1899598389863968,
86
+ "learning_rate": 4.231810883773999e-05,
87
+ "loss": 0.0642,
88
+ "num_input_tokens_seen": 2056592,
89
+ "step": 50
90
+ },
91
+ {
92
+ "epoch": 0.8380952380952381,
93
+ "grad_norm": 0.11073227226734161,
94
+ "learning_rate": 4.0811134389884433e-05,
95
+ "loss": 0.053,
96
+ "num_input_tokens_seen": 2265056,
97
+ "step": 55
98
+ },
99
+ {
100
+ "epoch": 0.9142857142857143,
101
+ "grad_norm": 0.11537300050258636,
102
+ "learning_rate": 3.920161866827889e-05,
103
+ "loss": 0.0566,
104
+ "num_input_tokens_seen": 2471328,
105
+ "step": 60
106
+ },
107
+ {
108
+ "epoch": 0.9904761904761905,
109
+ "grad_norm": 0.1397155374288559,
110
+ "learning_rate": 3.7500000000000003e-05,
111
+ "loss": 0.0496,
112
+ "num_input_tokens_seen": 2676640,
113
+ "step": 65
114
+ },
115
+ {
116
+ "epoch": 1.0761904761904761,
117
+ "grad_norm": 0.1408267617225647,
118
+ "learning_rate": 3.5717314035076355e-05,
119
+ "loss": 0.0577,
120
+ "num_input_tokens_seen": 2897712,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 1.1523809523809523,
125
+ "grad_norm": 0.11563374847173691,
126
+ "learning_rate": 3.386512217606339e-05,
127
+ "loss": 0.0442,
128
+ "num_input_tokens_seen": 3089808,
129
+ "step": 75
130
+ },
131
+ {
132
+ "epoch": 1.2285714285714286,
133
+ "grad_norm": 0.1368333101272583,
134
+ "learning_rate": 3.195543659791132e-05,
135
+ "loss": 0.0469,
136
+ "num_input_tokens_seen": 3296576,
137
+ "step": 80
138
+ },
139
+ {
140
+ "epoch": 1.3047619047619048,
141
+ "grad_norm": 0.11226939409971237,
142
+ "learning_rate": 3.0000642344401113e-05,
143
+ "loss": 0.0417,
144
+ "num_input_tokens_seen": 3508416,
145
+ "step": 85
146
+ },
147
+ {
148
+ "epoch": 1.380952380952381,
149
+ "grad_norm": 0.12701162695884705,
150
+ "learning_rate": 2.8013417006383076e-05,
151
+ "loss": 0.0451,
152
+ "num_input_tokens_seen": 3706512,
153
+ "step": 90
154
+ },
155
+ {
156
+ "epoch": 1.457142857142857,
157
+ "grad_norm": 0.11398270726203918,
158
+ "learning_rate": 2.600664850273538e-05,
159
+ "loss": 0.0438,
160
+ "num_input_tokens_seen": 3913120,
161
+ "step": 95
162
+ },
163
+ {
164
+ "epoch": 1.5333333333333332,
165
+ "grad_norm": 0.15226365625858307,
166
+ "learning_rate": 2.399335149726463e-05,
167
+ "loss": 0.038,
168
+ "num_input_tokens_seen": 4121248,
169
+ "step": 100
170
+ },
171
+ {
172
+ "epoch": 1.6095238095238096,
173
+ "grad_norm": 0.12406383454799652,
174
+ "learning_rate": 2.1986582993616926e-05,
175
+ "loss": 0.038,
176
+ "num_input_tokens_seen": 4327824,
177
+ "step": 105
178
+ },
179
+ {
180
+ "epoch": 1.6857142857142857,
181
+ "grad_norm": 0.11916245520114899,
182
+ "learning_rate": 1.9999357655598893e-05,
183
+ "loss": 0.0397,
184
+ "num_input_tokens_seen": 4537936,
185
+ "step": 110
186
+ },
187
+ {
188
+ "epoch": 1.7619047619047619,
189
+ "grad_norm": 0.14286760985851288,
190
+ "learning_rate": 1.8044563402088684e-05,
191
+ "loss": 0.0404,
192
+ "num_input_tokens_seen": 4744432,
193
+ "step": 115
194
+ },
195
+ {
196
+ "epoch": 1.8380952380952382,
197
+ "grad_norm": 0.09759990125894547,
198
+ "learning_rate": 1.613487782393661e-05,
199
+ "loss": 0.0362,
200
+ "num_input_tokens_seen": 4955296,
201
+ "step": 120
202
+ },
203
+ {
204
+ "epoch": 1.9142857142857141,
205
+ "grad_norm": 0.13581913709640503,
206
+ "learning_rate": 1.4282685964923642e-05,
207
+ "loss": 0.0397,
208
+ "num_input_tokens_seen": 5154784,
209
+ "step": 125
210
+ },
211
+ {
212
+ "epoch": 1.9904761904761905,
213
+ "grad_norm": 0.12309057265520096,
214
+ "learning_rate": 1.2500000000000006e-05,
215
+ "loss": 0.0343,
216
+ "num_input_tokens_seen": 5358160,
217
+ "step": 130
218
+ },
219
+ {
220
+ "epoch": 2.0761904761904764,
221
+ "grad_norm": 0.11022833734750748,
222
+ "learning_rate": 1.0798381331721109e-05,
223
+ "loss": 0.0396,
224
+ "num_input_tokens_seen": 5577808,
225
+ "step": 135
226
+ },
227
+ {
228
+ "epoch": 2.1523809523809523,
229
+ "grad_norm": 0.10238117724657059,
230
+ "learning_rate": 9.18886561011557e-06,
231
+ "loss": 0.0394,
232
+ "num_input_tokens_seen": 5775552,
233
+ "step": 140
234
+ },
235
+ {
236
+ "epoch": 2.2285714285714286,
237
+ "grad_norm": 0.09375017136335373,
238
+ "learning_rate": 7.681891162260015e-06,
239
+ "loss": 0.0311,
240
+ "num_input_tokens_seen": 5986112,
241
+ "step": 145
242
+ },
243
+ {
244
+ "epoch": 2.3047619047619046,
245
+ "grad_norm": 0.13229627907276154,
246
+ "learning_rate": 6.28723129572247e-06,
247
+ "loss": 0.048,
248
+ "num_input_tokens_seen": 6185568,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 2.380952380952381,
253
+ "grad_norm": 0.1364370435476303,
254
+ "learning_rate": 5.013930914912476e-06,
255
+ "loss": 0.0374,
256
+ "num_input_tokens_seen": 6389392,
257
+ "step": 155
258
+ },
259
+ {
260
+ "epoch": 2.4571428571428573,
261
+ "grad_norm": 0.11261958628892899,
262
+ "learning_rate": 3.8702478614051355e-06,
263
+ "loss": 0.0334,
264
+ "num_input_tokens_seen": 6594256,
265
+ "step": 160
266
+ },
267
+ {
268
+ "epoch": 2.533333333333333,
269
+ "grad_norm": 0.09721696376800537,
270
+ "learning_rate": 2.8635993586697553e-06,
271
+ "loss": 0.0324,
272
+ "num_input_tokens_seen": 6802240,
273
+ "step": 165
274
+ },
275
+ {
276
+ "epoch": 2.6095238095238096,
277
+ "grad_norm": 0.11983498930931091,
278
+ "learning_rate": 2.0005139085293945e-06,
279
+ "loss": 0.0342,
280
+ "num_input_tokens_seen": 7004816,
281
+ "step": 170
282
+ },
283
+ {
284
+ "epoch": 2.685714285714286,
285
+ "grad_norm": 0.13199082016944885,
286
+ "learning_rate": 1.286588951321363e-06,
287
+ "loss": 0.0384,
288
+ "num_input_tokens_seen": 7208608,
289
+ "step": 175
290
+ },
291
+ {
292
+ "epoch": 2.761904761904762,
293
+ "grad_norm": 0.10022356361150742,
294
+ "learning_rate": 7.264545643486997e-07,
295
+ "loss": 0.0357,
296
+ "num_input_tokens_seen": 7418320,
297
+ "step": 180
298
+ },
299
+ {
300
+ "epoch": 2.8380952380952382,
301
+ "grad_norm": 0.11056291311979294,
302
+ "learning_rate": 3.237434340521789e-07,
303
+ "loss": 0.0351,
304
+ "num_input_tokens_seen": 7633248,
305
+ "step": 185
306
+ },
307
+ {
308
+ "epoch": 2.914285714285714,
309
+ "grad_norm": 0.12205488979816437,
310
+ "learning_rate": 8.106729664475176e-08,
311
+ "loss": 0.0324,
312
+ "num_input_tokens_seen": 7838576,
313
+ "step": 190
314
+ },
315
+ {
316
+ "epoch": 2.9904761904761905,
317
+ "grad_norm": 0.10819905996322632,
318
+ "learning_rate": 0.0,
319
+ "loss": 0.0357,
320
+ "num_input_tokens_seen": 8039616,
321
+ "step": 195
322
+ },
323
+ {
324
+ "epoch": 2.9904761904761905,
325
+ "num_input_tokens_seen": 8039616,
326
+ "step": 195,
327
+ "total_flos": 3.746411207808123e+17,
328
+ "train_loss": 0.05875852833955716,
329
+ "train_runtime": 3746.4527,
330
+ "train_samples_per_second": 0.841,
331
+ "train_steps_per_second": 0.052
332
+ }
333
+ ],
334
+ "logging_steps": 5,
335
+ "max_steps": 195,
336
+ "num_input_tokens_seen": 8039616,
337
+ "num_train_epochs": 3,
338
+ "save_steps": 100,
339
+ "stateful_callbacks": {
340
+ "TrainerControl": {
341
+ "args": {
342
+ "should_epoch_stop": false,
343
+ "should_evaluate": false,
344
+ "should_log": false,
345
+ "should_save": true,
346
+ "should_training_stop": true
347
+ },
348
+ "attributes": {}
349
+ }
350
+ },
351
+ "total_flos": 3.746411207808123e+17,
352
+ "train_batch_size": 2,
353
+ "trial_name": null,
354
+ "trial_params": null
355
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:228b1695b1c7eda19f24070b43c6f3492403b0468d8e1d5e3d62b239049e3e70
3
+ size 5688
training_args.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bf16: true
2
+ cutoff_len: 34114
3
+ dataset: BsKIE3
4
+ dataset_dir: data
5
+ ddp_timeout: 180000000
6
+ do_train: true
7
+ finetuning_type: lora
8
+ flash_attn: auto
9
+ gradient_accumulation_steps: 8
10
+ include_num_input_tokens_seen: true
11
+ learning_rate: 5.0e-05
12
+ logging_steps: 5
13
+ lora_alpha: 16
14
+ lora_dropout: 0
15
+ lora_rank: 8
16
+ lora_target: all
17
+ lr_scheduler_type: cosine
18
+ max_grad_norm: 1.0
19
+ max_samples: 100000
20
+ model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
21
+ num_train_epochs: 3.0
22
+ optim: adamw_torch
23
+ output_dir: saves/Qwen2-VL-7B-Instruct/lora/BS_riche_lora
24
+ packing: false
25
+ per_device_train_batch_size: 2
26
+ plot_loss: true
27
+ preprocessing_num_workers: 16
28
+ report_to: none
29
+ save_steps: 100
30
+ stage: sft
31
+ template: qwen2_vl
32
+ trust_remote_code: true
33
+ warmup_steps: 0
training_loss.png ADDED
vocab.json ADDED
The diff for this file is too large to render. See raw diff