htlou commited on
Commit
b130df9
·
verified ·
1 Parent(s): fe77add

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: other
4
+ base_model: llava-hf/llava-v1.6-mistral-7b-hf
5
+ tags:
6
+ - llama-factory
7
+ - full
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: AA_text_image_to_text
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # AA_text_image_to_text
18
+
19
+ This model is a fine-tuned version of [llava-hf/llava-v1.6-mistral-7b-hf](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf) on the AA_text_image_to_text dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.4527
22
+ - Rewards/chosen: -0.6857
23
+ - Rewards/rejected: -4.3940
24
+ - Rewards/accuracies: 0.8165
25
+ - Rewards/margins: 3.7083
26
+ - Logps/rejected: -242.1480
27
+ - Logps/chosen: -207.1762
28
+ - Logits/rejected: -2.3240
29
+ - Logits/chosen: -2.3485
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 1e-06
49
+ - train_batch_size: 8
50
+ - eval_batch_size: 8
51
+ - seed: 42
52
+ - distributed_type: multi-GPU
53
+ - num_devices: 8
54
+ - gradient_accumulation_steps: 4
55
+ - total_train_batch_size: 256
56
+ - total_eval_batch_size: 64
57
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
+ - lr_scheduler_type: cosine
59
+ - lr_scheduler_warmup_steps: 10
60
+ - num_epochs: 3.0
61
+
62
+ ### Training results
63
+
64
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
+ |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
+ | 0.4889 | 0.2899 | 40 | 0.4642 | 1.1544 | -0.1887 | 0.7944 | 1.3431 | -200.0950 | -188.7752 | -1.9876 | -2.0351 |
67
+ | 0.3941 | 0.5797 | 80 | 0.4218 | -0.2275 | -2.2919 | 0.8044 | 2.0644 | -221.1273 | -202.5944 | -1.9449 | -1.9901 |
68
+ | 0.3717 | 0.8696 | 120 | 0.4387 | -0.2101 | -2.4885 | 0.8286 | 2.2784 | -223.0936 | -202.4208 | -2.0902 | -2.1229 |
69
+ | 0.1459 | 1.1594 | 160 | 0.4288 | -0.4029 | -3.3928 | 0.8286 | 2.9899 | -232.1363 | -204.3488 | -2.2733 | -2.3007 |
70
+ | 0.1455 | 1.4493 | 200 | 0.4255 | -0.5338 | -3.6331 | 0.8165 | 3.0992 | -234.5387 | -205.6577 | -2.2466 | -2.2697 |
71
+ | 0.1358 | 1.7391 | 240 | 0.4247 | -0.2714 | -3.6715 | 0.8327 | 3.4001 | -234.9227 | -203.0333 | -2.3605 | -2.3806 |
72
+ | 0.0938 | 2.0290 | 280 | 0.4128 | -0.3136 | -3.7007 | 0.8266 | 3.3870 | -235.2147 | -203.4556 | -2.3725 | -2.3933 |
73
+ | 0.0592 | 2.3188 | 320 | 0.4438 | -0.5767 | -4.1235 | 0.8165 | 3.5467 | -239.4429 | -206.0869 | -2.3109 | -2.3358 |
74
+ | 0.0673 | 2.6087 | 360 | 0.4553 | -0.6264 | -4.3005 | 0.8206 | 3.6740 | -241.2126 | -206.5837 | -2.3254 | -2.3497 |
75
+ | 0.0728 | 2.8986 | 400 | 0.4520 | -0.6855 | -4.3942 | 0.8185 | 3.7087 | -242.1503 | -207.1744 | -2.3247 | -2.3492 |
76
+
77
+
78
+ ### Framework versions
79
+
80
+ - Transformers 4.45.2
81
+ - Pytorch 2.4.0+cu121
82
+ - Datasets 2.21.0
83
+ - Tokenizers 0.20.3
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<image>": 32000,
3
+ "<pad>": 32001
4
+ }
all_results.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_logits/chosen": -2.3485426902770996,
4
+ "eval_logits/rejected": -2.32399582862854,
5
+ "eval_logps/chosen": -207.17623901367188,
6
+ "eval_logps/rejected": -242.1480255126953,
7
+ "eval_loss": 0.45268043875694275,
8
+ "eval_rewards/accuracies": 0.8165322542190552,
9
+ "eval_rewards/chosen": -0.6856781244277954,
10
+ "eval_rewards/margins": 3.7083182334899902,
11
+ "eval_rewards/rejected": -4.393996715545654,
12
+ "eval_runtime": 246.3688,
13
+ "eval_samples_per_second": 15.919,
14
+ "eval_steps_per_second": 0.252,
15
+ "total_flos": 4881795388538880.0,
16
+ "train_loss": 0.21833302825689316,
17
+ "train_runtime": 16364.2441,
18
+ "train_samples_per_second": 6.47,
19
+ "train_steps_per_second": 0.025
20
+ }
config.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/data/align-anything/hantao/models/llava-v1.6-mistral-7b-hf",
3
+ "architectures": [
4
+ "LlavaNextForConditionalGeneration"
5
+ ],
6
+ "hidden_size": 4096,
7
+ "ignore_index": -100,
8
+ "image_grid_pinpoints": [
9
+ [
10
+ 336,
11
+ 672
12
+ ],
13
+ [
14
+ 672,
15
+ 336
16
+ ],
17
+ [
18
+ 672,
19
+ 672
20
+ ],
21
+ [
22
+ 1008,
23
+ 336
24
+ ],
25
+ [
26
+ 336,
27
+ 1008
28
+ ]
29
+ ],
30
+ "image_seq_length": 576,
31
+ "image_token_index": 32000,
32
+ "model_type": "llava_next",
33
+ "projector_hidden_act": "gelu",
34
+ "text_config": {
35
+ "_name_or_path": "mistralai/Mistral-7B-Instruct-v0.2",
36
+ "architectures": [
37
+ "MistralForCausalLM"
38
+ ],
39
+ "intermediate_size": 14336,
40
+ "max_position_embeddings": 32768,
41
+ "model_type": "mistral",
42
+ "num_key_value_heads": 8,
43
+ "rms_norm_eps": 1e-05,
44
+ "rope_theta": 1000000.0,
45
+ "sliding_window": null,
46
+ "torch_dtype": "bfloat16",
47
+ "vocab_size": 32064
48
+ },
49
+ "tie_word_embeddings": false,
50
+ "torch_dtype": "bfloat16",
51
+ "transformers_version": "4.45.2",
52
+ "use_cache": false,
53
+ "use_image_newline_parameter": true,
54
+ "vision_config": {
55
+ "hidden_size": 1024,
56
+ "image_size": 336,
57
+ "intermediate_size": 4096,
58
+ "model_type": "clip_vision_model",
59
+ "num_attention_heads": 16,
60
+ "num_hidden_layers": 24,
61
+ "patch_size": 14,
62
+ "projection_dim": 768,
63
+ "vocab_size": 32000
64
+ },
65
+ "vision_feature_layer": -2,
66
+ "vision_feature_select_strategy": "default",
67
+ "vocab_size": 32064
68
+ }
eval_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_logits/chosen": -2.3485426902770996,
4
+ "eval_logits/rejected": -2.32399582862854,
5
+ "eval_logps/chosen": -207.17623901367188,
6
+ "eval_logps/rejected": -242.1480255126953,
7
+ "eval_loss": 0.45268043875694275,
8
+ "eval_rewards/accuracies": 0.8165322542190552,
9
+ "eval_rewards/chosen": -0.6856781244277954,
10
+ "eval_rewards/margins": 3.7083182334899902,
11
+ "eval_rewards/rejected": -4.393996715545654,
12
+ "eval_runtime": 246.3688,
13
+ "eval_samples_per_second": 15.919,
14
+ "eval_steps_per_second": 0.252
15
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.45.2"
6
+ }
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a84e8a9a7ebe4ff4b2015a69a4c46817eb13604ba3e2a4c7407b5402a157c7f7
3
+ size 4921618624
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0f2df815b86850138487a57cdef3d98d74a57fb72c086a4ca8ccf6e2a6649a1
3
+ size 4915917672
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0e55ba4ce5e4e27ef18780b78ac1c19f481fc39b3fe7cfa0235527ea5ce8a8e
3
+ size 4915917680
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa9afdcd9573f57434542c4cbceefb8973b0e40eac069ef6b92e58d28da96977
3
+ size 380134008
model.safetensors.index.json ADDED
@@ -0,0 +1,694 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 15133495296
4
+ },
5
+ "weight_map": {
6
+ "image_newline": "model-00001-of-00004.safetensors",
7
+ "language_model.lm_head.weight": "model-00004-of-00004.safetensors",
8
+ "language_model.model.embed_tokens.weight": "model-00001-of-00004.safetensors",
9
+ "language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
10
+ "language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
11
+ "language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
12
+ "language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
13
+ "language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
14
+ "language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
15
+ "language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
16
+ "language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
17
+ "language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
18
+ "language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
19
+ "language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
20
+ "language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
21
+ "language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
22
+ "language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
23
+ "language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
24
+ "language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
25
+ "language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
26
+ "language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
27
+ "language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
28
+ "language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
29
+ "language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
30
+ "language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
31
+ "language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
32
+ "language_model.model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
33
+ "language_model.model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
34
+ "language_model.model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
35
+ "language_model.model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
36
+ "language_model.model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
37
+ "language_model.model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
38
+ "language_model.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
39
+ "language_model.model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
40
+ "language_model.model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
41
+ "language_model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
42
+ "language_model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
43
+ "language_model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
44
+ "language_model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
45
+ "language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
46
+ "language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
47
+ "language_model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
48
+ "language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
49
+ "language_model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
50
+ "language_model.model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
51
+ "language_model.model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
52
+ "language_model.model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
53
+ "language_model.model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
54
+ "language_model.model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
55
+ "language_model.model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
56
+ "language_model.model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
57
+ "language_model.model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
58
+ "language_model.model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
59
+ "language_model.model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
60
+ "language_model.model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
61
+ "language_model.model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
62
+ "language_model.model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
63
+ "language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
64
+ "language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
65
+ "language_model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
66
+ "language_model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
67
+ "language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
68
+ "language_model.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
69
+ "language_model.model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
70
+ "language_model.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
71
+ "language_model.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
72
+ "language_model.model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
73
+ "language_model.model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
74
+ "language_model.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
75
+ "language_model.model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
76
+ "language_model.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
77
+ "language_model.model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
78
+ "language_model.model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
79
+ "language_model.model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
80
+ "language_model.model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
81
+ "language_model.model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
82
+ "language_model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
83
+ "language_model.model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
84
+ "language_model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
85
+ "language_model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
86
+ "language_model.model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
87
+ "language_model.model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
88
+ "language_model.model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
89
+ "language_model.model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
90
+ "language_model.model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
91
+ "language_model.model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
92
+ "language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
93
+ "language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
94
+ "language_model.model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
95
+ "language_model.model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
96
+ "language_model.model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
97
+ "language_model.model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
98
+ "language_model.model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
99
+ "language_model.model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
100
+ "language_model.model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
101
+ "language_model.model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
102
+ "language_model.model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
103
+ "language_model.model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
104
+ "language_model.model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
105
+ "language_model.model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
106
+ "language_model.model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
107
+ "language_model.model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
108
+ "language_model.model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
109
+ "language_model.model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
110
+ "language_model.model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
111
+ "language_model.model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
112
+ "language_model.model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
113
+ "language_model.model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
114
+ "language_model.model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
115
+ "language_model.model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
116
+ "language_model.model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
117
+ "language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
118
+ "language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
119
+ "language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
120
+ "language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
121
+ "language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
122
+ "language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
123
+ "language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
124
+ "language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
125
+ "language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
126
+ "language_model.model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
127
+ "language_model.model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
128
+ "language_model.model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
129
+ "language_model.model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
130
+ "language_model.model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
131
+ "language_model.model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
132
+ "language_model.model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
133
+ "language_model.model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
134
+ "language_model.model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
135
+ "language_model.model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
136
+ "language_model.model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
137
+ "language_model.model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
138
+ "language_model.model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
139
+ "language_model.model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
140
+ "language_model.model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
141
+ "language_model.model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
142
+ "language_model.model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
143
+ "language_model.model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
144
+ "language_model.model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
145
+ "language_model.model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
146
+ "language_model.model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
147
+ "language_model.model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
148
+ "language_model.model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
149
+ "language_model.model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
150
+ "language_model.model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
151
+ "language_model.model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
152
+ "language_model.model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
153
+ "language_model.model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
154
+ "language_model.model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
155
+ "language_model.model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
156
+ "language_model.model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
157
+ "language_model.model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
158
+ "language_model.model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
159
+ "language_model.model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
160
+ "language_model.model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
161
+ "language_model.model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
162
+ "language_model.model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
163
+ "language_model.model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
164
+ "language_model.model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
165
+ "language_model.model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
166
+ "language_model.model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
167
+ "language_model.model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
168
+ "language_model.model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
169
+ "language_model.model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
170
+ "language_model.model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
171
+ "language_model.model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
172
+ "language_model.model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
173
+ "language_model.model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
174
+ "language_model.model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
175
+ "language_model.model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
176
+ "language_model.model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
177
+ "language_model.model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
178
+ "language_model.model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
179
+ "language_model.model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
180
+ "language_model.model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
181
+ "language_model.model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
182
+ "language_model.model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
183
+ "language_model.model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
184
+ "language_model.model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
185
+ "language_model.model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
186
+ "language_model.model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
187
+ "language_model.model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
188
+ "language_model.model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
189
+ "language_model.model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
190
+ "language_model.model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
191
+ "language_model.model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
192
+ "language_model.model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
193
+ "language_model.model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
194
+ "language_model.model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
195
+ "language_model.model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
196
+ "language_model.model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
197
+ "language_model.model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
198
+ "language_model.model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
199
+ "language_model.model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
200
+ "language_model.model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
201
+ "language_model.model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
202
+ "language_model.model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
203
+ "language_model.model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
204
+ "language_model.model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
205
+ "language_model.model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
206
+ "language_model.model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
207
+ "language_model.model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
208
+ "language_model.model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
209
+ "language_model.model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
210
+ "language_model.model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
211
+ "language_model.model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
212
+ "language_model.model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
213
+ "language_model.model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
214
+ "language_model.model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
215
+ "language_model.model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
216
+ "language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
217
+ "language_model.model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
218
+ "language_model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
219
+ "language_model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
220
+ "language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
221
+ "language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
222
+ "language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
223
+ "language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
224
+ "language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
225
+ "language_model.model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
226
+ "language_model.model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
227
+ "language_model.model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
228
+ "language_model.model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
229
+ "language_model.model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
230
+ "language_model.model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
231
+ "language_model.model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
232
+ "language_model.model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
233
+ "language_model.model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
234
+ "language_model.model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
235
+ "language_model.model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
236
+ "language_model.model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
237
+ "language_model.model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
238
+ "language_model.model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
239
+ "language_model.model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
240
+ "language_model.model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
241
+ "language_model.model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
242
+ "language_model.model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
243
+ "language_model.model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
244
+ "language_model.model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
245
+ "language_model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
246
+ "language_model.model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
247
+ "language_model.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
248
+ "language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
249
+ "language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
250
+ "language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
251
+ "language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
252
+ "language_model.model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
253
+ "language_model.model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
254
+ "language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
255
+ "language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
256
+ "language_model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
257
+ "language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
258
+ "language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
259
+ "language_model.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
260
+ "language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
261
+ "language_model.model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
262
+ "language_model.model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
263
+ "language_model.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
264
+ "language_model.model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
265
+ "language_model.model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
266
+ "language_model.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
267
+ "language_model.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
268
+ "language_model.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
269
+ "language_model.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
270
+ "language_model.model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
271
+ "language_model.model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
272
+ "language_model.model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
273
+ "language_model.model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
274
+ "language_model.model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
275
+ "language_model.model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
276
+ "language_model.model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
277
+ "language_model.model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
278
+ "language_model.model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
279
+ "language_model.model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
280
+ "language_model.model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
281
+ "language_model.model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
282
+ "language_model.model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
283
+ "language_model.model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
284
+ "language_model.model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
285
+ "language_model.model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
286
+ "language_model.model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
287
+ "language_model.model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
288
+ "language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
289
+ "language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
290
+ "language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
291
+ "language_model.model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
292
+ "language_model.model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
293
+ "language_model.model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
294
+ "language_model.model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
295
+ "language_model.model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
296
+ "language_model.model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
297
+ "language_model.model.norm.weight": "model-00004-of-00004.safetensors",
298
+ "multi_modal_projector.linear_1.bias": "model-00001-of-00004.safetensors",
299
+ "multi_modal_projector.linear_1.weight": "model-00001-of-00004.safetensors",
300
+ "multi_modal_projector.linear_2.bias": "model-00001-of-00004.safetensors",
301
+ "multi_modal_projector.linear_2.weight": "model-00001-of-00004.safetensors",
302
+ "vision_tower.vision_model.embeddings.class_embedding": "model-00001-of-00004.safetensors",
303
+ "vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00001-of-00004.safetensors",
304
+ "vision_tower.vision_model.embeddings.position_embedding.weight": "model-00001-of-00004.safetensors",
305
+ "vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00001-of-00004.safetensors",
306
+ "vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00001-of-00004.safetensors",
307
+ "vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00001-of-00004.safetensors",
308
+ "vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00001-of-00004.safetensors",
309
+ "vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00004.safetensors",
310
+ "vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00004.safetensors",
311
+ "vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00004.safetensors",
312
+ "vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00004.safetensors",
313
+ "vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
314
+ "vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
315
+ "vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
316
+ "vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
317
+ "vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
318
+ "vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
319
+ "vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
320
+ "vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
321
+ "vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00001-of-00004.safetensors",
322
+ "vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00001-of-00004.safetensors",
323
+ "vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00001-of-00004.safetensors",
324
+ "vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00001-of-00004.safetensors",
325
+ "vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00004.safetensors",
326
+ "vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00004.safetensors",
327
+ "vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00004.safetensors",
328
+ "vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00004.safetensors",
329
+ "vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
330
+ "vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
331
+ "vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
332
+ "vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
333
+ "vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
334
+ "vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
335
+ "vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
336
+ "vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
337
+ "vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00001-of-00004.safetensors",
338
+ "vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00001-of-00004.safetensors",
339
+ "vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00001-of-00004.safetensors",
340
+ "vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00001-of-00004.safetensors",
341
+ "vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00004.safetensors",
342
+ "vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00004.safetensors",
343
+ "vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00004.safetensors",
344
+ "vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00004.safetensors",
345
+ "vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
346
+ "vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
347
+ "vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
348
+ "vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
349
+ "vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
350
+ "vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
351
+ "vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
352
+ "vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
353
+ "vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00001-of-00004.safetensors",
354
+ "vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00001-of-00004.safetensors",
355
+ "vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00001-of-00004.safetensors",
356
+ "vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00001-of-00004.safetensors",
357
+ "vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00004.safetensors",
358
+ "vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00004.safetensors",
359
+ "vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00004.safetensors",
360
+ "vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00004.safetensors",
361
+ "vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
362
+ "vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
363
+ "vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
364
+ "vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
365
+ "vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
366
+ "vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
367
+ "vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
368
+ "vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
369
+ "vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00001-of-00004.safetensors",
370
+ "vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00001-of-00004.safetensors",
371
+ "vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00001-of-00004.safetensors",
372
+ "vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00001-of-00004.safetensors",
373
+ "vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00001-of-00004.safetensors",
374
+ "vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00001-of-00004.safetensors",
375
+ "vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00001-of-00004.safetensors",
376
+ "vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00001-of-00004.safetensors",
377
+ "vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
378
+ "vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
379
+ "vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
380
+ "vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
381
+ "vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
382
+ "vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
383
+ "vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
384
+ "vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
385
+ "vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00001-of-00004.safetensors",
386
+ "vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00001-of-00004.safetensors",
387
+ "vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00001-of-00004.safetensors",
388
+ "vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00001-of-00004.safetensors",
389
+ "vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00001-of-00004.safetensors",
390
+ "vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00001-of-00004.safetensors",
391
+ "vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00001-of-00004.safetensors",
392
+ "vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00001-of-00004.safetensors",
393
+ "vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
394
+ "vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
395
+ "vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
396
+ "vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
397
+ "vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
398
+ "vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
399
+ "vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
400
+ "vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
401
+ "vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00001-of-00004.safetensors",
402
+ "vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00001-of-00004.safetensors",
403
+ "vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00001-of-00004.safetensors",
404
+ "vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00001-of-00004.safetensors",
405
+ "vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00001-of-00004.safetensors",
406
+ "vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00001-of-00004.safetensors",
407
+ "vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00001-of-00004.safetensors",
408
+ "vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00001-of-00004.safetensors",
409
+ "vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
410
+ "vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
411
+ "vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
412
+ "vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
413
+ "vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
414
+ "vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
415
+ "vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
416
+ "vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
417
+ "vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00001-of-00004.safetensors",
418
+ "vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00001-of-00004.safetensors",
419
+ "vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00001-of-00004.safetensors",
420
+ "vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00001-of-00004.safetensors",
421
+ "vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00001-of-00004.safetensors",
422
+ "vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00001-of-00004.safetensors",
423
+ "vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00001-of-00004.safetensors",
424
+ "vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00001-of-00004.safetensors",
425
+ "vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
426
+ "vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
427
+ "vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
428
+ "vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
429
+ "vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
430
+ "vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
431
+ "vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
432
+ "vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
433
+ "vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00001-of-00004.safetensors",
434
+ "vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00001-of-00004.safetensors",
435
+ "vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00001-of-00004.safetensors",
436
+ "vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00001-of-00004.safetensors",
437
+ "vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00001-of-00004.safetensors",
438
+ "vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00001-of-00004.safetensors",
439
+ "vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00001-of-00004.safetensors",
440
+ "vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00001-of-00004.safetensors",
441
+ "vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
442
+ "vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
443
+ "vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
444
+ "vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
445
+ "vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
446
+ "vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
447
+ "vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
448
+ "vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
449
+ "vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00001-of-00004.safetensors",
450
+ "vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00001-of-00004.safetensors",
451
+ "vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00001-of-00004.safetensors",
452
+ "vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00001-of-00004.safetensors",
453
+ "vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00001-of-00004.safetensors",
454
+ "vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00001-of-00004.safetensors",
455
+ "vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00001-of-00004.safetensors",
456
+ "vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00001-of-00004.safetensors",
457
+ "vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
458
+ "vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
459
+ "vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
460
+ "vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
461
+ "vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
462
+ "vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
463
+ "vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
464
+ "vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
465
+ "vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00001-of-00004.safetensors",
466
+ "vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00001-of-00004.safetensors",
467
+ "vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00001-of-00004.safetensors",
468
+ "vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00001-of-00004.safetensors",
469
+ "vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00001-of-00004.safetensors",
470
+ "vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00001-of-00004.safetensors",
471
+ "vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00001-of-00004.safetensors",
472
+ "vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00001-of-00004.safetensors",
473
+ "vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
474
+ "vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
475
+ "vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
476
+ "vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
477
+ "vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
478
+ "vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
479
+ "vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
480
+ "vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
481
+ "vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00001-of-00004.safetensors",
482
+ "vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00001-of-00004.safetensors",
483
+ "vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00001-of-00004.safetensors",
484
+ "vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00001-of-00004.safetensors",
485
+ "vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00001-of-00004.safetensors",
486
+ "vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00001-of-00004.safetensors",
487
+ "vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00001-of-00004.safetensors",
488
+ "vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00001-of-00004.safetensors",
489
+ "vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
490
+ "vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
491
+ "vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
492
+ "vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
493
+ "vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
494
+ "vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
495
+ "vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
496
+ "vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
497
+ "vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00001-of-00004.safetensors",
498
+ "vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00001-of-00004.safetensors",
499
+ "vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00001-of-00004.safetensors",
500
+ "vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00001-of-00004.safetensors",
501
+ "vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00004.safetensors",
502
+ "vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00004.safetensors",
503
+ "vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00004.safetensors",
504
+ "vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00004.safetensors",
505
+ "vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
506
+ "vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
507
+ "vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
508
+ "vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
509
+ "vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
510
+ "vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
511
+ "vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
512
+ "vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
513
+ "vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00001-of-00004.safetensors",
514
+ "vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00001-of-00004.safetensors",
515
+ "vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00001-of-00004.safetensors",
516
+ "vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00001-of-00004.safetensors",
517
+ "vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00001-of-00004.safetensors",
518
+ "vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00001-of-00004.safetensors",
519
+ "vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00001-of-00004.safetensors",
520
+ "vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00001-of-00004.safetensors",
521
+ "vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
522
+ "vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
523
+ "vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
524
+ "vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
525
+ "vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
526
+ "vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
527
+ "vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
528
+ "vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
529
+ "vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00001-of-00004.safetensors",
530
+ "vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00001-of-00004.safetensors",
531
+ "vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00001-of-00004.safetensors",
532
+ "vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00001-of-00004.safetensors",
533
+ "vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00001-of-00004.safetensors",
534
+ "vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00001-of-00004.safetensors",
535
+ "vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00001-of-00004.safetensors",
536
+ "vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00001-of-00004.safetensors",
537
+ "vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
538
+ "vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
539
+ "vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
540
+ "vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
541
+ "vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
542
+ "vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
543
+ "vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
544
+ "vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
545
+ "vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00001-of-00004.safetensors",
546
+ "vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00001-of-00004.safetensors",
547
+ "vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00001-of-00004.safetensors",
548
+ "vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00001-of-00004.safetensors",
549
+ "vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00001-of-00004.safetensors",
550
+ "vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00001-of-00004.safetensors",
551
+ "vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00001-of-00004.safetensors",
552
+ "vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00001-of-00004.safetensors",
553
+ "vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
554
+ "vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
555
+ "vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
556
+ "vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
557
+ "vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
558
+ "vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
559
+ "vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
560
+ "vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
561
+ "vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00001-of-00004.safetensors",
562
+ "vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00001-of-00004.safetensors",
563
+ "vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00001-of-00004.safetensors",
564
+ "vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00001-of-00004.safetensors",
565
+ "vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00001-of-00004.safetensors",
566
+ "vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00001-of-00004.safetensors",
567
+ "vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00001-of-00004.safetensors",
568
+ "vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00001-of-00004.safetensors",
569
+ "vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
570
+ "vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
571
+ "vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
572
+ "vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
573
+ "vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
574
+ "vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
575
+ "vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
576
+ "vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
577
+ "vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00001-of-00004.safetensors",
578
+ "vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00001-of-00004.safetensors",
579
+ "vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00001-of-00004.safetensors",
580
+ "vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00001-of-00004.safetensors",
581
+ "vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00004.safetensors",
582
+ "vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00004.safetensors",
583
+ "vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00004.safetensors",
584
+ "vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00004.safetensors",
585
+ "vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
586
+ "vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
587
+ "vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
588
+ "vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
589
+ "vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
590
+ "vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
591
+ "vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
592
+ "vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
593
+ "vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00001-of-00004.safetensors",
594
+ "vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00001-of-00004.safetensors",
595
+ "vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00001-of-00004.safetensors",
596
+ "vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00001-of-00004.safetensors",
597
+ "vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00004.safetensors",
598
+ "vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00004.safetensors",
599
+ "vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00004.safetensors",
600
+ "vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00004.safetensors",
601
+ "vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
602
+ "vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
603
+ "vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
604
+ "vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
605
+ "vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
606
+ "vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
607
+ "vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
608
+ "vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
609
+ "vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00001-of-00004.safetensors",
610
+ "vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00001-of-00004.safetensors",
611
+ "vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00001-of-00004.safetensors",
612
+ "vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00001-of-00004.safetensors",
613
+ "vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00004.safetensors",
614
+ "vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00004.safetensors",
615
+ "vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00004.safetensors",
616
+ "vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00004.safetensors",
617
+ "vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
618
+ "vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
619
+ "vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
620
+ "vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
621
+ "vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
622
+ "vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
623
+ "vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
624
+ "vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
625
+ "vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00001-of-00004.safetensors",
626
+ "vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00001-of-00004.safetensors",
627
+ "vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00001-of-00004.safetensors",
628
+ "vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00001-of-00004.safetensors",
629
+ "vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00004.safetensors",
630
+ "vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00004.safetensors",
631
+ "vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00004.safetensors",
632
+ "vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00004.safetensors",
633
+ "vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
634
+ "vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
635
+ "vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
636
+ "vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
637
+ "vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
638
+ "vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
639
+ "vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
640
+ "vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
641
+ "vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00001-of-00004.safetensors",
642
+ "vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00001-of-00004.safetensors",
643
+ "vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00001-of-00004.safetensors",
644
+ "vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00001-of-00004.safetensors",
645
+ "vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00004.safetensors",
646
+ "vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00004.safetensors",
647
+ "vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00004.safetensors",
648
+ "vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00004.safetensors",
649
+ "vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
650
+ "vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
651
+ "vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
652
+ "vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
653
+ "vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
654
+ "vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
655
+ "vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
656
+ "vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
657
+ "vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00001-of-00004.safetensors",
658
+ "vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00001-of-00004.safetensors",
659
+ "vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00001-of-00004.safetensors",
660
+ "vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00001-of-00004.safetensors",
661
+ "vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00004.safetensors",
662
+ "vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00004.safetensors",
663
+ "vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00004.safetensors",
664
+ "vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00004.safetensors",
665
+ "vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
666
+ "vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
667
+ "vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
668
+ "vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
669
+ "vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
670
+ "vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
671
+ "vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
672
+ "vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
673
+ "vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00001-of-00004.safetensors",
674
+ "vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00001-of-00004.safetensors",
675
+ "vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00001-of-00004.safetensors",
676
+ "vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00001-of-00004.safetensors",
677
+ "vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00004.safetensors",
678
+ "vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00004.safetensors",
679
+ "vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00004.safetensors",
680
+ "vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00004.safetensors",
681
+ "vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
682
+ "vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
683
+ "vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
684
+ "vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
685
+ "vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
686
+ "vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
687
+ "vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
688
+ "vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
689
+ "vision_tower.vision_model.post_layernorm.bias": "model-00001-of-00004.safetensors",
690
+ "vision_tower.vision_model.post_layernorm.weight": "model-00001-of-00004.safetensors",
691
+ "vision_tower.vision_model.pre_layrnorm.bias": "model-00001-of-00004.safetensors",
692
+ "vision_tower.vision_model.pre_layrnorm.weight": "model-00001-of-00004.safetensors"
693
+ }
694
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "aspect_ratio_setting": "anyres",
3
+ "crop_size": {
4
+ "height": 336,
5
+ "width": 336
6
+ },
7
+ "do_center_crop": true,
8
+ "do_convert_rgb": true,
9
+ "do_normalize": true,
10
+ "do_pad": true,
11
+ "do_rescale": true,
12
+ "do_resize": true,
13
+ "image_grid_pinpoints": [
14
+ [
15
+ 336,
16
+ 672
17
+ ],
18
+ [
19
+ 672,
20
+ 336
21
+ ],
22
+ [
23
+ 672,
24
+ 672
25
+ ],
26
+ [
27
+ 1008,
28
+ 336
29
+ ],
30
+ [
31
+ 336,
32
+ 1008
33
+ ]
34
+ ],
35
+ "image_mean": [
36
+ 0.48145466,
37
+ 0.4578275,
38
+ 0.40821073
39
+ ],
40
+ "image_processor_type": "LlavaNextImageProcessor",
41
+ "image_std": [
42
+ 0.26862954,
43
+ 0.26130258,
44
+ 0.27577711
45
+ ],
46
+ "processor_class": "LlavaNextProcessor",
47
+ "resample": 3,
48
+ "rescale_factor": 0.00392156862745098,
49
+ "size": {
50
+ "shortest_edge": 336
51
+ }
52
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "32000": {
31
+ "content": "<image>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "32001": {
39
+ "content": "<pad>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": true
45
+ }
46
+ },
47
+ "additional_special_tokens": [],
48
+ "bos_token": "<s>",
49
+ "chat_template": "{{ '<s>' }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '`[INST] `' + content + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ content + '</s>' }}{% endif %}{% endfor %}",
50
+ "clean_up_tokenization_spaces": false,
51
+ "eos_token": "</s>",
52
+ "extra_special_tokens": {
53
+ "image_token": "<image>"
54
+ },
55
+ "image_token": "<image>",
56
+ "legacy": true,
57
+ "max_length": null,
58
+ "model_max_length": 1000000000000000019884624838656,
59
+ "pad_to_multiple_of": null,
60
+ "pad_token": "<pad>",
61
+ "pad_token_type_id": 0,
62
+ "padding_side": "right",
63
+ "processor_class": "LlavaNextProcessor",
64
+ "sp_model_kwargs": {},
65
+ "spaces_between_special_tokens": false,
66
+ "split_special_tokens": false,
67
+ "tokenizer_class": "LlamaTokenizer",
68
+ "unk_token": "<unk>",
69
+ "use_default_system_prompt": false
70
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "total_flos": 4881795388538880.0,
4
+ "train_loss": 0.21833302825689316,
5
+ "train_runtime": 16364.2441,
6
+ "train_samples_per_second": 6.47,
7
+ "train_steps_per_second": 0.025
8
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 5, "total_steps": 414, "loss": 0.689, "accuracy": 0.3187499940395355, "learning_rate": 5e-07, "epoch": 0.036231884057971016, "percentage": 1.21, "elapsed_time": "0:02:46", "remaining_time": "3:47:19"}
2
+ {"current_steps": 10, "total_steps": 414, "loss": 0.6275, "accuracy": 0.675000011920929, "learning_rate": 1e-06, "epoch": 0.07246376811594203, "percentage": 2.42, "elapsed_time": "0:05:29", "remaining_time": "3:42:02"}
3
+ {"current_steps": 15, "total_steps": 414, "loss": 0.5851, "accuracy": 0.75, "learning_rate": 9.996221126793764e-07, "epoch": 0.10869565217391304, "percentage": 3.62, "elapsed_time": "0:08:13", "remaining_time": "3:38:51"}
4
+ {"current_steps": 20, "total_steps": 414, "loss": 0.5144, "accuracy": 0.699999988079071, "learning_rate": 9.984890219128145e-07, "epoch": 0.14492753623188406, "percentage": 4.83, "elapsed_time": "0:10:57", "remaining_time": "3:35:53"}
5
+ {"current_steps": 25, "total_steps": 414, "loss": 0.5036, "accuracy": 0.7562500238418579, "learning_rate": 9.966024404228493e-07, "epoch": 0.18115942028985507, "percentage": 6.04, "elapsed_time": "0:13:41", "remaining_time": "3:32:58"}
6
+ {"current_steps": 30, "total_steps": 414, "loss": 0.4987, "accuracy": 0.762499988079071, "learning_rate": 9.939652198703783e-07, "epoch": 0.21739130434782608, "percentage": 7.25, "elapsed_time": "0:16:24", "remaining_time": "3:30:04"}
7
+ {"current_steps": 35, "total_steps": 414, "loss": 0.5091, "accuracy": 0.7250000238418579, "learning_rate": 9.905813465442354e-07, "epoch": 0.2536231884057971, "percentage": 8.45, "elapsed_time": "0:19:08", "remaining_time": "3:27:19"}
8
+ {"current_steps": 40, "total_steps": 414, "loss": 0.4889, "accuracy": 0.7749999761581421, "learning_rate": 9.864559353357187e-07, "epoch": 0.2898550724637681, "percentage": 9.66, "elapsed_time": "0:21:51", "remaining_time": "3:24:23"}
9
+ {"current_steps": 40, "total_steps": 414, "eval_loss": 0.4642000198364258, "epoch": 0.2898550724637681, "percentage": 9.66, "elapsed_time": "0:26:00", "remaining_time": "4:03:08"}
10
+ {"current_steps": 45, "total_steps": 414, "loss": 0.4587, "accuracy": 0.762499988079071, "learning_rate": 9.815952220071804e-07, "epoch": 0.32608695652173914, "percentage": 10.87, "elapsed_time": "0:29:15", "remaining_time": "3:59:51"}
11
+ {"current_steps": 50, "total_steps": 414, "loss": 0.4531, "accuracy": 0.768750011920929, "learning_rate": 9.76006553766365e-07, "epoch": 0.36231884057971014, "percentage": 12.08, "elapsed_time": "0:31:59", "remaining_time": "3:52:50"}
12
+ {"current_steps": 55, "total_steps": 414, "loss": 0.4595, "accuracy": 0.8374999761581421, "learning_rate": 9.696983781607415e-07, "epoch": 0.39855072463768115, "percentage": 13.29, "elapsed_time": "0:34:43", "remaining_time": "3:46:37"}
13
+ {"current_steps": 60, "total_steps": 414, "loss": 0.4276, "accuracy": 0.8187500238418579, "learning_rate": 9.626802303086209e-07, "epoch": 0.43478260869565216, "percentage": 14.49, "elapsed_time": "0:37:26", "remaining_time": "3:40:54"}
14
+ {"current_steps": 65, "total_steps": 414, "loss": 0.4293, "accuracy": 0.7875000238418579, "learning_rate": 9.549627184863528e-07, "epoch": 0.47101449275362317, "percentage": 15.7, "elapsed_time": "0:40:09", "remaining_time": "3:35:38"}
15
+ {"current_steps": 70, "total_steps": 414, "loss": 0.3963, "accuracy": 0.831250011920929, "learning_rate": 9.465575080933957e-07, "epoch": 0.5072463768115942, "percentage": 16.91, "elapsed_time": "0:42:53", "remaining_time": "3:30:49"}
16
+ {"current_steps": 75, "total_steps": 414, "loss": 0.4353, "accuracy": 0.824999988079071, "learning_rate": 9.374773040194878e-07, "epoch": 0.5434782608695652, "percentage": 18.12, "elapsed_time": "0:45:37", "remaining_time": "3:26:13"}
17
+ {"current_steps": 80, "total_steps": 414, "loss": 0.3941, "accuracy": 0.8125, "learning_rate": 9.277358314405818e-07, "epoch": 0.5797101449275363, "percentage": 19.32, "elapsed_time": "0:48:20", "remaining_time": "3:21:51"}
18
+ {"current_steps": 80, "total_steps": 414, "eval_loss": 0.4217630624771118, "epoch": 0.5797101449275363, "percentage": 19.32, "elapsed_time": "0:52:28", "remaining_time": "3:39:03"}
19
+ {"current_steps": 85, "total_steps": 414, "loss": 0.422, "accuracy": 0.831250011920929, "learning_rate": 9.173478150725651e-07, "epoch": 0.6159420289855072, "percentage": 20.53, "elapsed_time": "0:55:44", "remaining_time": "3:35:44"}
20
+ {"current_steps": 90, "total_steps": 414, "loss": 0.4135, "accuracy": 0.84375, "learning_rate": 9.063289569141251e-07, "epoch": 0.6521739130434783, "percentage": 21.74, "elapsed_time": "0:58:27", "remaining_time": "3:30:25"}
21
+ {"current_steps": 95, "total_steps": 414, "loss": 0.4077, "accuracy": 0.824999988079071, "learning_rate": 8.946959125124051e-07, "epoch": 0.6884057971014492, "percentage": 22.95, "elapsed_time": "1:01:10", "remaining_time": "3:25:26"}
22
+ {"current_steps": 100, "total_steps": 414, "loss": 0.3767, "accuracy": 0.800000011920929, "learning_rate": 8.824662657873238e-07, "epoch": 0.7246376811594203, "percentage": 24.15, "elapsed_time": "1:03:54", "remaining_time": "3:20:40"}
23
+ {"current_steps": 105, "total_steps": 414, "loss": 0.4017, "accuracy": 0.8125, "learning_rate": 8.696585024526135e-07, "epoch": 0.7608695652173914, "percentage": 25.36, "elapsed_time": "1:06:38", "remaining_time": "3:16:05"}
24
+ {"current_steps": 110, "total_steps": 414, "loss": 0.3767, "accuracy": 0.84375, "learning_rate": 8.562919820737535e-07, "epoch": 0.7971014492753623, "percentage": 26.57, "elapsed_time": "1:09:21", "remaining_time": "3:11:40"}
25
+ {"current_steps": 115, "total_steps": 414, "loss": 0.3892, "accuracy": 0.8125, "learning_rate": 8.423869088050315e-07, "epoch": 0.8333333333333334, "percentage": 27.78, "elapsed_time": "1:12:04", "remaining_time": "3:07:22"}
26
+ {"current_steps": 120, "total_steps": 414, "loss": 0.3717, "accuracy": 0.862500011920929, "learning_rate": 8.2796430084997e-07, "epoch": 0.8695652173913043, "percentage": 28.99, "elapsed_time": "1:14:47", "remaining_time": "3:03:14"}
27
+ {"current_steps": 120, "total_steps": 414, "eval_loss": 0.438678115606308, "epoch": 0.8695652173913043, "percentage": 28.99, "elapsed_time": "1:18:55", "remaining_time": "3:13:20"}
28
+ {"current_steps": 125, "total_steps": 414, "loss": 0.4575, "accuracy": 0.800000011920929, "learning_rate": 8.130459586912753e-07, "epoch": 0.9057971014492754, "percentage": 30.19, "elapsed_time": "1:22:09", "remaining_time": "3:09:57"}
29
+ {"current_steps": 130, "total_steps": 414, "loss": 0.388, "accuracy": 0.856249988079071, "learning_rate": 7.97654432138333e-07, "epoch": 0.9420289855072463, "percentage": 31.4, "elapsed_time": "1:24:53", "remaining_time": "3:05:27"}
30
+ {"current_steps": 135, "total_steps": 414, "loss": 0.365, "accuracy": 0.831250011920929, "learning_rate": 7.81812986242061e-07, "epoch": 0.9782608695652174, "percentage": 32.61, "elapsed_time": "1:27:36", "remaining_time": "3:01:03"}
31
+ {"current_steps": 140, "total_steps": 414, "loss": 0.2524, "accuracy": 0.918749988079071, "learning_rate": 7.655455661286375e-07, "epoch": 1.0144927536231885, "percentage": 33.82, "elapsed_time": "1:30:19", "remaining_time": "2:56:47"}
32
+ {"current_steps": 145, "total_steps": 414, "loss": 0.171, "accuracy": 0.9375, "learning_rate": 7.488767608052628e-07, "epoch": 1.0507246376811594, "percentage": 35.02, "elapsed_time": "1:33:02", "remaining_time": "2:52:37"}
33
+ {"current_steps": 150, "total_steps": 414, "loss": 0.1436, "accuracy": 0.9375, "learning_rate": 7.318317659926636e-07, "epoch": 1.0869565217391304, "percentage": 36.23, "elapsed_time": "1:35:46", "remaining_time": "2:48:33"}
34
+ {"current_steps": 155, "total_steps": 414, "loss": 0.1415, "accuracy": 0.9624999761581421, "learning_rate": 7.144363460405189e-07, "epoch": 1.1231884057971016, "percentage": 37.44, "elapsed_time": "1:38:29", "remaining_time": "2:44:34"}
35
+ {"current_steps": 160, "total_steps": 414, "loss": 0.1459, "accuracy": 0.949999988079071, "learning_rate": 6.967167949833762e-07, "epoch": 1.1594202898550725, "percentage": 38.65, "elapsed_time": "1:41:13", "remaining_time": "2:40:42"}
36
+ {"current_steps": 160, "total_steps": 414, "eval_loss": 0.42876046895980835, "epoch": 1.1594202898550725, "percentage": 38.65, "elapsed_time": "1:45:21", "remaining_time": "2:47:14"}
37
+ {"current_steps": 165, "total_steps": 414, "loss": 0.1497, "accuracy": 0.9375, "learning_rate": 6.786998967959219e-07, "epoch": 1.1956521739130435, "percentage": 39.86, "elapsed_time": "1:48:36", "remaining_time": "2:43:54"}
38
+ {"current_steps": 170, "total_steps": 414, "loss": 0.1533, "accuracy": 0.9437500238418579, "learning_rate": 6.604128849076838e-07, "epoch": 1.2318840579710144, "percentage": 41.06, "elapsed_time": "1:51:20", "remaining_time": "2:39:47"}
39
+ {"current_steps": 175, "total_steps": 414, "loss": 0.1459, "accuracy": 0.9437500238418579, "learning_rate": 6.418834010383609e-07, "epoch": 1.2681159420289856, "percentage": 42.27, "elapsed_time": "1:54:03", "remaining_time": "2:35:46"}
40
+ {"current_steps": 180, "total_steps": 414, "loss": 0.1408, "accuracy": 0.96875, "learning_rate": 6.231394534160007e-07, "epoch": 1.3043478260869565, "percentage": 43.48, "elapsed_time": "1:56:46", "remaining_time": "2:31:48"}
41
+ {"current_steps": 185, "total_steps": 414, "loss": 0.1426, "accuracy": 0.9437500238418579, "learning_rate": 6.042093744411828e-07, "epoch": 1.3405797101449275, "percentage": 44.69, "elapsed_time": "1:59:29", "remaining_time": "2:27:54"}
42
+ {"current_steps": 190, "total_steps": 414, "loss": 0.1351, "accuracy": 0.925000011920929, "learning_rate": 5.851217778611993e-07, "epoch": 1.3768115942028984, "percentage": 45.89, "elapsed_time": "2:02:13", "remaining_time": "2:24:05"}
43
+ {"current_steps": 195, "total_steps": 414, "loss": 0.1542, "accuracy": 0.9437500238418579, "learning_rate": 5.659055155189651e-07, "epoch": 1.4130434782608696, "percentage": 47.1, "elapsed_time": "2:04:55", "remaining_time": "2:20:18"}
44
+ {"current_steps": 200, "total_steps": 414, "loss": 0.1455, "accuracy": 0.9437500238418579, "learning_rate": 5.465896337420358e-07, "epoch": 1.4492753623188406, "percentage": 48.31, "elapsed_time": "2:07:39", "remaining_time": "2:16:35"}
45
+ {"current_steps": 200, "total_steps": 414, "eval_loss": 0.42552247643470764, "epoch": 1.4492753623188406, "percentage": 48.31, "elapsed_time": "2:11:46", "remaining_time": "2:20:59"}
46
+ {"current_steps": 205, "total_steps": 414, "loss": 0.1402, "accuracy": 0.9375, "learning_rate": 5.272033294376521e-07, "epoch": 1.4855072463768115, "percentage": 49.52, "elapsed_time": "2:15:00", "remaining_time": "2:17:38"}
47
+ {"current_steps": 210, "total_steps": 414, "loss": 0.1564, "accuracy": 0.949999988079071, "learning_rate": 5.077759059601755e-07, "epoch": 1.5217391304347827, "percentage": 50.72, "elapsed_time": "2:17:43", "remaining_time": "2:13:47"}
48
+ {"current_steps": 215, "total_steps": 414, "loss": 0.1462, "accuracy": 0.949999988079071, "learning_rate": 4.883367288176238e-07, "epoch": 1.5579710144927537, "percentage": 51.93, "elapsed_time": "2:20:26", "remaining_time": "2:09:59"}
49
+ {"current_steps": 220, "total_steps": 414, "loss": 0.149, "accuracy": 0.949999988079071, "learning_rate": 4.6891518128425974e-07, "epoch": 1.5942028985507246, "percentage": 53.14, "elapsed_time": "2:23:09", "remaining_time": "2:06:14"}
50
+ {"current_steps": 225, "total_steps": 414, "loss": 0.1258, "accuracy": 0.956250011920929, "learning_rate": 4.495406199863217e-07, "epoch": 1.6304347826086958, "percentage": 54.35, "elapsed_time": "2:25:52", "remaining_time": "2:02:32"}
51
+ {"current_steps": 230, "total_steps": 414, "loss": 0.1301, "accuracy": 0.9375, "learning_rate": 4.302423305280385e-07, "epoch": 1.6666666666666665, "percentage": 55.56, "elapsed_time": "2:28:35", "remaining_time": "1:58:52"}
52
+ {"current_steps": 235, "total_steps": 414, "loss": 0.1278, "accuracy": 0.949999988079071, "learning_rate": 4.1104948322499386e-07, "epoch": 1.7028985507246377, "percentage": 56.76, "elapsed_time": "2:31:18", "remaining_time": "1:55:15"}
53
+ {"current_steps": 240, "total_steps": 414, "loss": 0.1358, "accuracy": 0.925000011920929, "learning_rate": 3.919910890117584e-07, "epoch": 1.7391304347826086, "percentage": 57.97, "elapsed_time": "2:34:02", "remaining_time": "1:51:40"}
54
+ {"current_steps": 240, "total_steps": 414, "eval_loss": 0.4247341454029083, "epoch": 1.7391304347826086, "percentage": 57.97, "elapsed_time": "2:38:09", "remaining_time": "1:54:40"}
55
+ {"current_steps": 245, "total_steps": 414, "loss": 0.1311, "accuracy": 0.9375, "learning_rate": 3.7309595559042973e-07, "epoch": 1.7753623188405796, "percentage": 59.18, "elapsed_time": "2:41:27", "remaining_time": "1:51:22"}
56
+ {"current_steps": 250, "total_steps": 414, "loss": 0.1252, "accuracy": 0.9437500238418579, "learning_rate": 3.54392643886374e-07, "epoch": 1.8115942028985508, "percentage": 60.39, "elapsed_time": "2:44:11", "remaining_time": "1:47:42"}
57
+ {"current_steps": 255, "total_steps": 414, "loss": 0.1613, "accuracy": 0.918749988079071, "learning_rate": 3.3590942487697765e-07, "epoch": 1.8478260869565217, "percentage": 61.59, "elapsed_time": "2:46:54", "remaining_time": "1:44:04"}
58
+ {"current_steps": 260, "total_steps": 414, "loss": 0.1348, "accuracy": 0.925000011920929, "learning_rate": 3.176742368586725e-07, "epoch": 1.8840579710144927, "percentage": 62.8, "elapsed_time": "2:49:37", "remaining_time": "1:40:28"}
59
+ {"current_steps": 265, "total_steps": 414, "loss": 0.1427, "accuracy": 0.925000011920929, "learning_rate": 2.997146432168236e-07, "epoch": 1.9202898550724639, "percentage": 64.01, "elapsed_time": "2:52:21", "remaining_time": "1:36:54"}
60
+ {"current_steps": 270, "total_steps": 414, "loss": 0.1257, "accuracy": 0.949999988079071, "learning_rate": 2.8205779076231446e-07, "epoch": 1.9565217391304348, "percentage": 65.22, "elapsed_time": "2:55:03", "remaining_time": "1:33:22"}
61
+ {"current_steps": 275, "total_steps": 414, "loss": 0.1492, "accuracy": 0.925000011920929, "learning_rate": 2.647303686978035e-07, "epoch": 1.9927536231884058, "percentage": 66.43, "elapsed_time": "2:57:46", "remaining_time": "1:29:51"}
62
+ {"current_steps": 280, "total_steps": 414, "loss": 0.0938, "accuracy": 0.949999988079071, "learning_rate": 2.4775856827568014e-07, "epoch": 2.028985507246377, "percentage": 67.63, "elapsed_time": "3:00:30", "remaining_time": "1:26:23"}
63
+ {"current_steps": 280, "total_steps": 414, "eval_loss": 0.41276049613952637, "epoch": 2.028985507246377, "percentage": 67.63, "elapsed_time": "3:04:37", "remaining_time": "1:28:21"}
64
+ {"current_steps": 285, "total_steps": 414, "loss": 0.072, "accuracy": 0.981249988079071, "learning_rate": 2.3116804320869464e-07, "epoch": 2.0652173913043477, "percentage": 68.84, "elapsed_time": "3:07:53", "remaining_time": "1:25:02"}
65
+ {"current_steps": 290, "total_steps": 414, "loss": 0.0702, "accuracy": 0.9624999761581421, "learning_rate": 2.1498387089310865e-07, "epoch": 2.101449275362319, "percentage": 70.05, "elapsed_time": "3:10:36", "remaining_time": "1:21:30"}
66
+ {"current_steps": 295, "total_steps": 414, "loss": 0.0725, "accuracy": 0.96875, "learning_rate": 1.9923051450297336e-07, "epoch": 2.13768115942029, "percentage": 71.26, "elapsed_time": "3:13:20", "remaining_time": "1:17:59"}
67
+ {"current_steps": 300, "total_steps": 414, "loss": 0.0649, "accuracy": 0.9937499761581421, "learning_rate": 1.839317860128368e-07, "epoch": 2.1739130434782608, "percentage": 72.46, "elapsed_time": "3:16:03", "remaining_time": "1:14:30"}
68
+ {"current_steps": 305, "total_steps": 414, "loss": 0.0638, "accuracy": 0.949999988079071, "learning_rate": 1.6911081020477176e-07, "epoch": 2.210144927536232, "percentage": 73.67, "elapsed_time": "3:18:46", "remaining_time": "1:11:02"}
69
+ {"current_steps": 310, "total_steps": 414, "loss": 0.0644, "accuracy": 0.981249988079071, "learning_rate": 1.5478998971412666e-07, "epoch": 2.246376811594203, "percentage": 74.88, "elapsed_time": "3:21:30", "remaining_time": "1:07:36"}
70
+ {"current_steps": 315, "total_steps": 414, "loss": 0.0609, "accuracy": 0.987500011920929, "learning_rate": 1.4099097116683873e-07, "epoch": 2.282608695652174, "percentage": 76.09, "elapsed_time": "3:24:14", "remaining_time": "1:04:11"}
71
+ {"current_steps": 320, "total_steps": 414, "loss": 0.0592, "accuracy": 0.9937499761581421, "learning_rate": 1.2773461245949247e-07, "epoch": 2.318840579710145, "percentage": 77.29, "elapsed_time": "3:26:57", "remaining_time": "1:00:47"}
72
+ {"current_steps": 320, "total_steps": 414, "eval_loss": 0.4438334107398987, "epoch": 2.318840579710145, "percentage": 77.29, "elapsed_time": "3:31:04", "remaining_time": "1:02:00"}
73
+ {"current_steps": 325, "total_steps": 414, "loss": 0.0642, "accuracy": 0.981249988079071, "learning_rate": 1.1504095123158014e-07, "epoch": 2.355072463768116, "percentage": 78.5, "elapsed_time": "3:34:21", "remaining_time": "0:58:41"}
74
+ {"current_steps": 330, "total_steps": 414, "loss": 0.0575, "accuracy": 0.981249988079071, "learning_rate": 1.0292917457762323e-07, "epoch": 2.391304347826087, "percentage": 79.71, "elapsed_time": "3:37:04", "remaining_time": "0:55:15"}
75
+ {"current_steps": 335, "total_steps": 414, "loss": 0.0539, "accuracy": 0.9937499761581421, "learning_rate": 9.141759004493282e-08, "epoch": 2.427536231884058, "percentage": 80.92, "elapsed_time": "3:39:47", "remaining_time": "0:51:49"}
76
+ {"current_steps": 340, "total_steps": 414, "loss": 0.0649, "accuracy": 0.987500011920929, "learning_rate": 8.052359796084951e-08, "epoch": 2.463768115942029, "percentage": 82.13, "elapsed_time": "3:42:31", "remaining_time": "0:48:25"}
77
+ {"current_steps": 345, "total_steps": 414, "loss": 0.0628, "accuracy": 0.987500011920929, "learning_rate": 7.026366513129139e-08, "epoch": 2.5, "percentage": 83.33, "elapsed_time": "3:45:14", "remaining_time": "0:45:02"}
78
+ {"current_steps": 350, "total_steps": 414, "loss": 0.0577, "accuracy": 0.987500011920929, "learning_rate": 6.065329995036572e-08, "epoch": 2.536231884057971, "percentage": 84.54, "elapsed_time": "3:47:57", "remaining_time": "0:41:40"}
79
+ {"current_steps": 355, "total_steps": 414, "loss": 0.0527, "accuracy": 0.9937499761581421, "learning_rate": 5.170702895866591e-08, "epoch": 2.572463768115942, "percentage": 85.75, "elapsed_time": "3:50:40", "remaining_time": "0:38:20"}
80
+ {"current_steps": 360, "total_steps": 414, "loss": 0.0673, "accuracy": 0.9750000238418579, "learning_rate": 4.343837488569057e-08, "epoch": 2.608695652173913, "percentage": 86.96, "elapsed_time": "3:53:23", "remaining_time": "0:35:00"}
81
+ {"current_steps": 360, "total_steps": 414, "eval_loss": 0.455331414937973, "epoch": 2.608695652173913, "percentage": 86.96, "elapsed_time": "3:57:31", "remaining_time": "0:35:37"}
82
+ {"current_steps": 365, "total_steps": 414, "loss": 0.0632, "accuracy": 0.9937499761581421, "learning_rate": 3.585983620957112e-08, "epoch": 2.644927536231884, "percentage": 88.16, "elapsed_time": "4:00:47", "remaining_time": "0:32:19"}
83
+ {"current_steps": 370, "total_steps": 414, "loss": 0.0587, "accuracy": 0.987500011920929, "learning_rate": 2.8982868265005454e-08, "epoch": 2.681159420289855, "percentage": 89.37, "elapsed_time": "4:03:30", "remaining_time": "0:28:57"}
84
+ {"current_steps": 375, "total_steps": 414, "loss": 0.0602, "accuracy": 0.981249988079071, "learning_rate": 2.2817865927956092e-08, "epoch": 2.717391304347826, "percentage": 90.58, "elapsed_time": "4:06:14", "remaining_time": "0:25:36"}
85
+ {"current_steps": 380, "total_steps": 414, "loss": 0.0483, "accuracy": 0.987500011920929, "learning_rate": 1.7374147903282176e-08, "epoch": 2.753623188405797, "percentage": 91.79, "elapsed_time": "4:08:58", "remaining_time": "0:22:16"}
86
+ {"current_steps": 385, "total_steps": 414, "loss": 0.0557, "accuracy": 0.987500011920929, "learning_rate": 1.2659942639057952e-08, "epoch": 2.789855072463768, "percentage": 93.0, "elapsed_time": "4:11:41", "remaining_time": "0:18:57"}
87
+ {"current_steps": 390, "total_steps": 414, "loss": 0.0505, "accuracy": 0.96875, "learning_rate": 8.682375888868166e-09, "epoch": 2.8260869565217392, "percentage": 94.2, "elapsed_time": "4:14:24", "remaining_time": "0:15:39"}
88
+ {"current_steps": 395, "total_steps": 414, "loss": 0.0541, "accuracy": 0.96875, "learning_rate": 5.447459940880084e-09, "epoch": 2.86231884057971, "percentage": 95.41, "elapsed_time": "4:17:08", "remaining_time": "0:12:22"}
89
+ {"current_steps": 400, "total_steps": 414, "loss": 0.0728, "accuracy": 0.9624999761581421, "learning_rate": 2.9600845299737053e-09, "epoch": 2.898550724637681, "percentage": 96.62, "elapsed_time": "4:19:51", "remaining_time": "0:09:05"}
90
+ {"current_steps": 400, "total_steps": 414, "eval_loss": 0.45203983783721924, "epoch": 2.898550724637681, "percentage": 96.62, "elapsed_time": "4:23:58", "remaining_time": "0:09:14"}
91
+ {"current_steps": 405, "total_steps": 414, "loss": 0.0602, "accuracy": 0.981249988079071, "learning_rate": 1.2240094466668404e-09, "epoch": 2.9347826086956523, "percentage": 97.83, "elapsed_time": "4:27:14", "remaining_time": "0:05:56"}
92
+ {"current_steps": 410, "total_steps": 414, "loss": 0.058, "accuracy": 0.9937499761581421, "learning_rate": 2.418588540059607e-10, "epoch": 2.971014492753623, "percentage": 99.03, "elapsed_time": "4:29:57", "remaining_time": "0:02:38"}
93
+ {"current_steps": 414, "total_steps": 414, "epoch": 3.0, "percentage": 100.0, "elapsed_time": "4:32:41", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,1432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "eval_steps": 40,
6
+ "global_step": 414,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.036231884057971016,
13
+ "grad_norm": 61.66789827681496,
14
+ "learning_rate": 5e-07,
15
+ "logits/chosen": -2.7321553230285645,
16
+ "logits/rejected": -2.7100937366485596,
17
+ "logps/chosen": -182.5845489501953,
18
+ "logps/rejected": -189.55001831054688,
19
+ "loss": 0.689,
20
+ "rewards/accuracies": 0.3187499940395355,
21
+ "rewards/chosen": -0.0021577859297394753,
22
+ "rewards/margins": 0.005646524019539356,
23
+ "rewards/rejected": -0.007804309483617544,
24
+ "step": 5
25
+ },
26
+ {
27
+ "epoch": 0.07246376811594203,
28
+ "grad_norm": 44.96703657621111,
29
+ "learning_rate": 1e-06,
30
+ "logits/chosen": -2.753889799118042,
31
+ "logits/rejected": -2.7519516944885254,
32
+ "logps/chosen": -197.34320068359375,
33
+ "logps/rejected": -184.00961303710938,
34
+ "loss": 0.6275,
35
+ "rewards/accuracies": 0.675000011920929,
36
+ "rewards/chosen": 0.030874451622366905,
37
+ "rewards/margins": 0.18904080986976624,
38
+ "rewards/rejected": -0.15816636383533478,
39
+ "step": 10
40
+ },
41
+ {
42
+ "epoch": 0.10869565217391304,
43
+ "grad_norm": 51.54402636298773,
44
+ "learning_rate": 9.996221126793764e-07,
45
+ "logits/chosen": -2.694437265396118,
46
+ "logits/rejected": -2.691904067993164,
47
+ "logps/chosen": -203.14883422851562,
48
+ "logps/rejected": -204.52386474609375,
49
+ "loss": 0.5851,
50
+ "rewards/accuracies": 0.75,
51
+ "rewards/chosen": 0.6205412745475769,
52
+ "rewards/margins": 0.9350436925888062,
53
+ "rewards/rejected": -0.314502477645874,
54
+ "step": 15
55
+ },
56
+ {
57
+ "epoch": 0.14492753623188406,
58
+ "grad_norm": 35.11282014809116,
59
+ "learning_rate": 9.984890219128145e-07,
60
+ "logits/chosen": -2.609405517578125,
61
+ "logits/rejected": -2.5795176029205322,
62
+ "logps/chosen": -188.33395385742188,
63
+ "logps/rejected": -192.52633666992188,
64
+ "loss": 0.5144,
65
+ "rewards/accuracies": 0.699999988079071,
66
+ "rewards/chosen": 0.8838651776313782,
67
+ "rewards/margins": 1.275496244430542,
68
+ "rewards/rejected": -0.39163118600845337,
69
+ "step": 20
70
+ },
71
+ {
72
+ "epoch": 0.18115942028985507,
73
+ "grad_norm": 37.05591291167376,
74
+ "learning_rate": 9.966024404228493e-07,
75
+ "logits/chosen": -2.4429798126220703,
76
+ "logits/rejected": -2.4225034713745117,
77
+ "logps/chosen": -179.79977416992188,
78
+ "logps/rejected": -179.25279235839844,
79
+ "loss": 0.5036,
80
+ "rewards/accuracies": 0.7562500238418579,
81
+ "rewards/chosen": 0.40532931685447693,
82
+ "rewards/margins": 0.8947975039482117,
83
+ "rewards/rejected": -0.4894680976867676,
84
+ "step": 25
85
+ },
86
+ {
87
+ "epoch": 0.21739130434782608,
88
+ "grad_norm": 33.27618771325657,
89
+ "learning_rate": 9.939652198703783e-07,
90
+ "logits/chosen": -2.319044589996338,
91
+ "logits/rejected": -2.320253372192383,
92
+ "logps/chosen": -188.62039184570312,
93
+ "logps/rejected": -193.8806915283203,
94
+ "loss": 0.4987,
95
+ "rewards/accuracies": 0.762499988079071,
96
+ "rewards/chosen": 0.6481167078018188,
97
+ "rewards/margins": 1.2183058261871338,
98
+ "rewards/rejected": -0.5701891183853149,
99
+ "step": 30
100
+ },
101
+ {
102
+ "epoch": 0.2536231884057971,
103
+ "grad_norm": 38.645073192304736,
104
+ "learning_rate": 9.905813465442354e-07,
105
+ "logits/chosen": -2.242053747177124,
106
+ "logits/rejected": -2.2155580520629883,
107
+ "logps/chosen": -203.94546508789062,
108
+ "logps/rejected": -194.8705291748047,
109
+ "loss": 0.5091,
110
+ "rewards/accuracies": 0.7250000238418579,
111
+ "rewards/chosen": 0.8871867060661316,
112
+ "rewards/margins": 1.2737131118774414,
113
+ "rewards/rejected": -0.3865264356136322,
114
+ "step": 35
115
+ },
116
+ {
117
+ "epoch": 0.2898550724637681,
118
+ "grad_norm": 30.937642063399068,
119
+ "learning_rate": 9.864559353357187e-07,
120
+ "logits/chosen": -2.0998620986938477,
121
+ "logits/rejected": -2.094836473464966,
122
+ "logps/chosen": -182.6898956298828,
123
+ "logps/rejected": -185.73983764648438,
124
+ "loss": 0.4889,
125
+ "rewards/accuracies": 0.7749999761581421,
126
+ "rewards/chosen": 1.0410809516906738,
127
+ "rewards/margins": 1.0222995281219482,
128
+ "rewards/rejected": 0.018781563267111778,
129
+ "step": 40
130
+ },
131
+ {
132
+ "epoch": 0.2898550724637681,
133
+ "eval_logits/chosen": -2.035081624984741,
134
+ "eval_logits/rejected": -1.9875677824020386,
135
+ "eval_logps/chosen": -188.77523803710938,
136
+ "eval_logps/rejected": -200.09498596191406,
137
+ "eval_loss": 0.4642000198364258,
138
+ "eval_rewards/accuracies": 0.7943548560142517,
139
+ "eval_rewards/chosen": 1.1544201374053955,
140
+ "eval_rewards/margins": 1.3431099653244019,
141
+ "eval_rewards/rejected": -0.18868987262248993,
142
+ "eval_runtime": 248.6586,
143
+ "eval_samples_per_second": 15.773,
144
+ "eval_steps_per_second": 0.249,
145
+ "step": 40
146
+ },
147
+ {
148
+ "epoch": 0.32608695652173914,
149
+ "grad_norm": 29.205255063362404,
150
+ "learning_rate": 9.815952220071804e-07,
151
+ "logits/chosen": -1.9733244180679321,
152
+ "logits/rejected": -1.9242098331451416,
153
+ "logps/chosen": -195.65652465820312,
154
+ "logps/rejected": -221.744384765625,
155
+ "loss": 0.4587,
156
+ "rewards/accuracies": 0.762499988079071,
157
+ "rewards/chosen": 1.3803393840789795,
158
+ "rewards/margins": 1.8610279560089111,
159
+ "rewards/rejected": -0.4806883931159973,
160
+ "step": 45
161
+ },
162
+ {
163
+ "epoch": 0.36231884057971014,
164
+ "grad_norm": 29.678687938529745,
165
+ "learning_rate": 9.76006553766365e-07,
166
+ "logits/chosen": -1.8085625171661377,
167
+ "logits/rejected": -1.772071123123169,
168
+ "logps/chosen": -198.9405517578125,
169
+ "logps/rejected": -203.29978942871094,
170
+ "loss": 0.4531,
171
+ "rewards/accuracies": 0.768750011920929,
172
+ "rewards/chosen": 0.8211376070976257,
173
+ "rewards/margins": 1.646414041519165,
174
+ "rewards/rejected": -0.8252763748168945,
175
+ "step": 50
176
+ },
177
+ {
178
+ "epoch": 0.39855072463768115,
179
+ "grad_norm": 30.73732601293307,
180
+ "learning_rate": 9.696983781607415e-07,
181
+ "logits/chosen": -1.7908868789672852,
182
+ "logits/rejected": -1.7599233388900757,
183
+ "logps/chosen": -183.01809692382812,
184
+ "logps/rejected": -172.02801513671875,
185
+ "loss": 0.4595,
186
+ "rewards/accuracies": 0.8374999761581421,
187
+ "rewards/chosen": 0.607209324836731,
188
+ "rewards/margins": 1.649515151977539,
189
+ "rewards/rejected": -1.042305827140808,
190
+ "step": 55
191
+ },
192
+ {
193
+ "epoch": 0.43478260869565216,
194
+ "grad_norm": 41.49881496846909,
195
+ "learning_rate": 9.626802303086209e-07,
196
+ "logits/chosen": -1.9050066471099854,
197
+ "logits/rejected": -1.8625679016113281,
198
+ "logps/chosen": -186.8179931640625,
199
+ "logps/rejected": -194.26707458496094,
200
+ "loss": 0.4276,
201
+ "rewards/accuracies": 0.8187500238418579,
202
+ "rewards/chosen": 0.4140642285346985,
203
+ "rewards/margins": 1.712418794631958,
204
+ "rewards/rejected": -1.2983543872833252,
205
+ "step": 60
206
+ },
207
+ {
208
+ "epoch": 0.47101449275362317,
209
+ "grad_norm": 36.480881476794956,
210
+ "learning_rate": 9.549627184863528e-07,
211
+ "logits/chosen": -2.1638290882110596,
212
+ "logits/rejected": -2.0793471336364746,
213
+ "logps/chosen": -191.9010467529297,
214
+ "logps/rejected": -193.20651245117188,
215
+ "loss": 0.4293,
216
+ "rewards/accuracies": 0.7875000238418579,
217
+ "rewards/chosen": 0.045776158571243286,
218
+ "rewards/margins": 1.6033704280853271,
219
+ "rewards/rejected": -1.5575940608978271,
220
+ "step": 65
221
+ },
222
+ {
223
+ "epoch": 0.5072463768115942,
224
+ "grad_norm": 26.318712537847357,
225
+ "learning_rate": 9.465575080933957e-07,
226
+ "logits/chosen": -2.1214632987976074,
227
+ "logits/rejected": -2.081162214279175,
228
+ "logps/chosen": -172.66419982910156,
229
+ "logps/rejected": -208.19149780273438,
230
+ "loss": 0.3963,
231
+ "rewards/accuracies": 0.831250011920929,
232
+ "rewards/chosen": 0.17696644365787506,
233
+ "rewards/margins": 1.8183233737945557,
234
+ "rewards/rejected": -1.6413570642471313,
235
+ "step": 70
236
+ },
237
+ {
238
+ "epoch": 0.5434782608695652,
239
+ "grad_norm": 32.35315227999789,
240
+ "learning_rate": 9.374773040194878e-07,
241
+ "logits/chosen": -2.1271562576293945,
242
+ "logits/rejected": -2.0680038928985596,
243
+ "logps/chosen": -205.5594482421875,
244
+ "logps/rejected": -210.7834014892578,
245
+ "loss": 0.4353,
246
+ "rewards/accuracies": 0.824999988079071,
247
+ "rewards/chosen": 0.16769471764564514,
248
+ "rewards/margins": 1.786273717880249,
249
+ "rewards/rejected": -1.6185792684555054,
250
+ "step": 75
251
+ },
252
+ {
253
+ "epoch": 0.5797101449275363,
254
+ "grad_norm": 29.96593011887677,
255
+ "learning_rate": 9.277358314405818e-07,
256
+ "logits/chosen": -2.046504497528076,
257
+ "logits/rejected": -2.0122628211975098,
258
+ "logps/chosen": -189.37771606445312,
259
+ "logps/rejected": -206.10659790039062,
260
+ "loss": 0.3941,
261
+ "rewards/accuracies": 0.8125,
262
+ "rewards/chosen": -0.2374907284975052,
263
+ "rewards/margins": 1.910300612449646,
264
+ "rewards/rejected": -2.1477913856506348,
265
+ "step": 80
266
+ },
267
+ {
268
+ "epoch": 0.5797101449275363,
269
+ "eval_logits/chosen": -1.9900643825531006,
270
+ "eval_logits/rejected": -1.9449083805084229,
271
+ "eval_logps/chosen": -202.59437561035156,
272
+ "eval_logps/rejected": -221.12725830078125,
273
+ "eval_loss": 0.4217630624771118,
274
+ "eval_rewards/accuracies": 0.8044354915618896,
275
+ "eval_rewards/chosen": -0.22749020159244537,
276
+ "eval_rewards/margins": 2.0644266605377197,
277
+ "eval_rewards/rejected": -2.291916847229004,
278
+ "eval_runtime": 247.3121,
279
+ "eval_samples_per_second": 15.859,
280
+ "eval_steps_per_second": 0.251,
281
+ "step": 80
282
+ },
283
+ {
284
+ "epoch": 0.6159420289855072,
285
+ "grad_norm": 34.207898226903815,
286
+ "learning_rate": 9.173478150725651e-07,
287
+ "logits/chosen": -2.0169568061828613,
288
+ "logits/rejected": -1.9391515254974365,
289
+ "logps/chosen": -209.73440551757812,
290
+ "logps/rejected": -215.14205932617188,
291
+ "loss": 0.422,
292
+ "rewards/accuracies": 0.831250011920929,
293
+ "rewards/chosen": 0.1130056157708168,
294
+ "rewards/margins": 2.3998470306396484,
295
+ "rewards/rejected": -2.2868411540985107,
296
+ "step": 85
297
+ },
298
+ {
299
+ "epoch": 0.6521739130434783,
300
+ "grad_norm": 28.850805260840193,
301
+ "learning_rate": 9.063289569141251e-07,
302
+ "logits/chosen": -2.1180360317230225,
303
+ "logits/rejected": -2.080235242843628,
304
+ "logps/chosen": -214.2292022705078,
305
+ "logps/rejected": -223.37564086914062,
306
+ "loss": 0.4135,
307
+ "rewards/accuracies": 0.84375,
308
+ "rewards/chosen": 0.5250645279884338,
309
+ "rewards/margins": 2.2779579162597656,
310
+ "rewards/rejected": -1.7528936862945557,
311
+ "step": 90
312
+ },
313
+ {
314
+ "epoch": 0.6884057971014492,
315
+ "grad_norm": 34.73111091101309,
316
+ "learning_rate": 8.946959125124051e-07,
317
+ "logits/chosen": -2.2047152519226074,
318
+ "logits/rejected": -2.1581788063049316,
319
+ "logps/chosen": -209.3234405517578,
320
+ "logps/rejected": -195.6946563720703,
321
+ "loss": 0.4077,
322
+ "rewards/accuracies": 0.824999988079071,
323
+ "rewards/chosen": 0.25443512201309204,
324
+ "rewards/margins": 2.2029635906219482,
325
+ "rewards/rejected": -1.948528528213501,
326
+ "step": 95
327
+ },
328
+ {
329
+ "epoch": 0.7246376811594203,
330
+ "grad_norm": 32.202226649445414,
331
+ "learning_rate": 8.824662657873238e-07,
332
+ "logits/chosen": -2.256134510040283,
333
+ "logits/rejected": -2.2395310401916504,
334
+ "logps/chosen": -176.19464111328125,
335
+ "logps/rejected": -209.0999298095703,
336
+ "loss": 0.3767,
337
+ "rewards/accuracies": 0.800000011920929,
338
+ "rewards/chosen": -0.3457742929458618,
339
+ "rewards/margins": 1.9383150339126587,
340
+ "rewards/rejected": -2.2840893268585205,
341
+ "step": 100
342
+ },
343
+ {
344
+ "epoch": 0.7608695652173914,
345
+ "grad_norm": 31.35693490170537,
346
+ "learning_rate": 8.696585024526135e-07,
347
+ "logits/chosen": -2.3358142375946045,
348
+ "logits/rejected": -2.307068347930908,
349
+ "logps/chosen": -191.8298797607422,
350
+ "logps/rejected": -227.55612182617188,
351
+ "loss": 0.4017,
352
+ "rewards/accuracies": 0.8125,
353
+ "rewards/chosen": -0.2825666069984436,
354
+ "rewards/margins": 2.5402400493621826,
355
+ "rewards/rejected": -2.8228065967559814,
356
+ "step": 105
357
+ },
358
+ {
359
+ "epoch": 0.7971014492753623,
360
+ "grad_norm": 36.29919961993296,
361
+ "learning_rate": 8.562919820737535e-07,
362
+ "logits/chosen": -2.2931602001190186,
363
+ "logits/rejected": -2.2568557262420654,
364
+ "logps/chosen": -207.34158325195312,
365
+ "logps/rejected": -208.9143524169922,
366
+ "loss": 0.3767,
367
+ "rewards/accuracies": 0.84375,
368
+ "rewards/chosen": -0.3017815351486206,
369
+ "rewards/margins": 2.3830084800720215,
370
+ "rewards/rejected": -2.6847901344299316,
371
+ "step": 110
372
+ },
373
+ {
374
+ "epoch": 0.8333333333333334,
375
+ "grad_norm": 28.819478551304137,
376
+ "learning_rate": 8.423869088050315e-07,
377
+ "logits/chosen": -2.2734763622283936,
378
+ "logits/rejected": -2.2522785663604736,
379
+ "logps/chosen": -195.6924591064453,
380
+ "logps/rejected": -221.85104370117188,
381
+ "loss": 0.3892,
382
+ "rewards/accuracies": 0.8125,
383
+ "rewards/chosen": -0.12773282825946808,
384
+ "rewards/margins": 2.3888843059539795,
385
+ "rewards/rejected": -2.5166170597076416,
386
+ "step": 115
387
+ },
388
+ {
389
+ "epoch": 0.8695652173913043,
390
+ "grad_norm": 37.085951928654964,
391
+ "learning_rate": 8.2796430084997e-07,
392
+ "logits/chosen": -2.2070839405059814,
393
+ "logits/rejected": -2.1736464500427246,
394
+ "logps/chosen": -196.70889282226562,
395
+ "logps/rejected": -207.93417358398438,
396
+ "loss": 0.3717,
397
+ "rewards/accuracies": 0.862500011920929,
398
+ "rewards/chosen": 0.12862932682037354,
399
+ "rewards/margins": 2.698848009109497,
400
+ "rewards/rejected": -2.570218563079834,
401
+ "step": 120
402
+ },
403
+ {
404
+ "epoch": 0.8695652173913043,
405
+ "eval_logits/chosen": -2.122933864593506,
406
+ "eval_logits/rejected": -2.0901710987091064,
407
+ "eval_logps/chosen": -202.4208221435547,
408
+ "eval_logps/rejected": -223.09356689453125,
409
+ "eval_loss": 0.438678115606308,
410
+ "eval_rewards/accuracies": 0.8286290168762207,
411
+ "eval_rewards/chosen": -0.2101391851902008,
412
+ "eval_rewards/margins": 2.278407573699951,
413
+ "eval_rewards/rejected": -2.488546848297119,
414
+ "eval_runtime": 247.3754,
415
+ "eval_samples_per_second": 15.854,
416
+ "eval_steps_per_second": 0.251,
417
+ "step": 120
418
+ },
419
+ {
420
+ "epoch": 0.9057971014492754,
421
+ "grad_norm": 27.843666767086823,
422
+ "learning_rate": 8.130459586912753e-07,
423
+ "logits/chosen": -2.0930094718933105,
424
+ "logits/rejected": -2.0791330337524414,
425
+ "logps/chosen": -220.4320526123047,
426
+ "logps/rejected": -218.3594512939453,
427
+ "loss": 0.4575,
428
+ "rewards/accuracies": 0.800000011920929,
429
+ "rewards/chosen": -0.9592973589897156,
430
+ "rewards/margins": 1.66092050075531,
431
+ "rewards/rejected": -2.620218276977539,
432
+ "step": 125
433
+ },
434
+ {
435
+ "epoch": 0.9420289855072463,
436
+ "grad_norm": 28.902451958691426,
437
+ "learning_rate": 7.97654432138333e-07,
438
+ "logits/chosen": -2.1393418312072754,
439
+ "logits/rejected": -2.103482723236084,
440
+ "logps/chosen": -214.5059051513672,
441
+ "logps/rejected": -248.8673858642578,
442
+ "loss": 0.388,
443
+ "rewards/accuracies": 0.856249988079071,
444
+ "rewards/chosen": -0.1911260336637497,
445
+ "rewards/margins": 2.8205361366271973,
446
+ "rewards/rejected": -3.011662006378174,
447
+ "step": 130
448
+ },
449
+ {
450
+ "epoch": 0.9782608695652174,
451
+ "grad_norm": 26.45174989804514,
452
+ "learning_rate": 7.81812986242061e-07,
453
+ "logits/chosen": -2.2018837928771973,
454
+ "logits/rejected": -2.1459240913391113,
455
+ "logps/chosen": -193.5723419189453,
456
+ "logps/rejected": -233.81838989257812,
457
+ "loss": 0.365,
458
+ "rewards/accuracies": 0.831250011920929,
459
+ "rewards/chosen": 0.10008885711431503,
460
+ "rewards/margins": 3.072911262512207,
461
+ "rewards/rejected": -2.972822427749634,
462
+ "step": 135
463
+ },
464
+ {
465
+ "epoch": 1.0144927536231885,
466
+ "grad_norm": 16.755101670108846,
467
+ "learning_rate": 7.655455661286375e-07,
468
+ "logits/chosen": -2.190566301345825,
469
+ "logits/rejected": -2.1852922439575195,
470
+ "logps/chosen": -190.56027221679688,
471
+ "logps/rejected": -234.6202392578125,
472
+ "loss": 0.2524,
473
+ "rewards/accuracies": 0.918749988079071,
474
+ "rewards/chosen": 0.05487387627363205,
475
+ "rewards/margins": 3.58606219291687,
476
+ "rewards/rejected": -3.531188488006592,
477
+ "step": 140
478
+ },
479
+ {
480
+ "epoch": 1.0507246376811594,
481
+ "grad_norm": 16.022443175608785,
482
+ "learning_rate": 7.488767608052628e-07,
483
+ "logits/chosen": -2.2735958099365234,
484
+ "logits/rejected": -2.199552059173584,
485
+ "logps/chosen": -190.21511840820312,
486
+ "logps/rejected": -237.59854125976562,
487
+ "loss": 0.171,
488
+ "rewards/accuracies": 0.9375,
489
+ "rewards/chosen": 0.7184330821037292,
490
+ "rewards/margins": 4.115548133850098,
491
+ "rewards/rejected": -3.3971149921417236,
492
+ "step": 145
493
+ },
494
+ {
495
+ "epoch": 1.0869565217391304,
496
+ "grad_norm": 16.37925141930124,
497
+ "learning_rate": 7.318317659926636e-07,
498
+ "logits/chosen": -2.250054121017456,
499
+ "logits/rejected": -2.221043109893799,
500
+ "logps/chosen": -174.0218048095703,
501
+ "logps/rejected": -237.14120483398438,
502
+ "loss": 0.1436,
503
+ "rewards/accuracies": 0.9375,
504
+ "rewards/chosen": 1.0139648914337158,
505
+ "rewards/margins": 4.169236660003662,
506
+ "rewards/rejected": -3.1552722454071045,
507
+ "step": 150
508
+ },
509
+ {
510
+ "epoch": 1.1231884057971016,
511
+ "grad_norm": 13.693805739113818,
512
+ "learning_rate": 7.144363460405189e-07,
513
+ "logits/chosen": -2.3314082622528076,
514
+ "logits/rejected": -2.2712883949279785,
515
+ "logps/chosen": -192.2017059326172,
516
+ "logps/rejected": -236.91586303710938,
517
+ "loss": 0.1415,
518
+ "rewards/accuracies": 0.9624999761581421,
519
+ "rewards/chosen": 1.1952037811279297,
520
+ "rewards/margins": 4.705626487731934,
521
+ "rewards/rejected": -3.510422945022583,
522
+ "step": 155
523
+ },
524
+ {
525
+ "epoch": 1.1594202898550725,
526
+ "grad_norm": 13.203700040695288,
527
+ "learning_rate": 6.967167949833762e-07,
528
+ "logits/chosen": -2.324735164642334,
529
+ "logits/rejected": -2.283231258392334,
530
+ "logps/chosen": -194.0807342529297,
531
+ "logps/rejected": -247.2218475341797,
532
+ "loss": 0.1459,
533
+ "rewards/accuracies": 0.949999988079071,
534
+ "rewards/chosen": 0.3761358857154846,
535
+ "rewards/margins": 4.9711785316467285,
536
+ "rewards/rejected": -4.595042705535889,
537
+ "step": 160
538
+ },
539
+ {
540
+ "epoch": 1.1594202898550725,
541
+ "eval_logits/chosen": -2.3007450103759766,
542
+ "eval_logits/rejected": -2.273284673690796,
543
+ "eval_logps/chosen": -204.34878540039062,
544
+ "eval_logps/rejected": -232.13629150390625,
545
+ "eval_loss": 0.42876046895980835,
546
+ "eval_rewards/accuracies": 0.8286290168762207,
547
+ "eval_rewards/chosen": -0.40293240547180176,
548
+ "eval_rewards/margins": 2.9898877143859863,
549
+ "eval_rewards/rejected": -3.392819881439209,
550
+ "eval_runtime": 247.3207,
551
+ "eval_samples_per_second": 15.858,
552
+ "eval_steps_per_second": 0.251,
553
+ "step": 160
554
+ },
555
+ {
556
+ "epoch": 1.1956521739130435,
557
+ "grad_norm": 17.287170066985027,
558
+ "learning_rate": 6.786998967959219e-07,
559
+ "logits/chosen": -2.313910961151123,
560
+ "logits/rejected": -2.274190902709961,
561
+ "logps/chosen": -200.9847869873047,
562
+ "logps/rejected": -228.14138793945312,
563
+ "loss": 0.1497,
564
+ "rewards/accuracies": 0.9375,
565
+ "rewards/chosen": 0.39603787660598755,
566
+ "rewards/margins": 4.507396697998047,
567
+ "rewards/rejected": -4.111359119415283,
568
+ "step": 165
569
+ },
570
+ {
571
+ "epoch": 1.2318840579710144,
572
+ "grad_norm": 16.43946047429659,
573
+ "learning_rate": 6.604128849076838e-07,
574
+ "logits/chosen": -2.340721607208252,
575
+ "logits/rejected": -2.305487871170044,
576
+ "logps/chosen": -200.94406127929688,
577
+ "logps/rejected": -238.47921752929688,
578
+ "loss": 0.1533,
579
+ "rewards/accuracies": 0.9437500238418579,
580
+ "rewards/chosen": 0.9461654424667358,
581
+ "rewards/margins": 4.6484904289245605,
582
+ "rewards/rejected": -3.702324390411377,
583
+ "step": 170
584
+ },
585
+ {
586
+ "epoch": 1.2681159420289856,
587
+ "grad_norm": 13.545354366232296,
588
+ "learning_rate": 6.418834010383609e-07,
589
+ "logits/chosen": -2.400474786758423,
590
+ "logits/rejected": -2.343231439590454,
591
+ "logps/chosen": -172.61148071289062,
592
+ "logps/rejected": -230.24197387695312,
593
+ "loss": 0.1459,
594
+ "rewards/accuracies": 0.9437500238418579,
595
+ "rewards/chosen": 0.5564432144165039,
596
+ "rewards/margins": 4.526711463928223,
597
+ "rewards/rejected": -3.9702675342559814,
598
+ "step": 175
599
+ },
600
+ {
601
+ "epoch": 1.3043478260869565,
602
+ "grad_norm": 16.208760712520707,
603
+ "learning_rate": 6.231394534160007e-07,
604
+ "logits/chosen": -2.3880228996276855,
605
+ "logits/rejected": -2.377924680709839,
606
+ "logps/chosen": -186.1154327392578,
607
+ "logps/rejected": -228.6842498779297,
608
+ "loss": 0.1408,
609
+ "rewards/accuracies": 0.96875,
610
+ "rewards/chosen": 1.147924542427063,
611
+ "rewards/margins": 4.681893348693848,
612
+ "rewards/rejected": -3.533968687057495,
613
+ "step": 180
614
+ },
615
+ {
616
+ "epoch": 1.3405797101449275,
617
+ "grad_norm": 16.391749704364813,
618
+ "learning_rate": 6.042093744411828e-07,
619
+ "logits/chosen": -2.3000502586364746,
620
+ "logits/rejected": -2.280015468597412,
621
+ "logps/chosen": -184.9687042236328,
622
+ "logps/rejected": -230.6714630126953,
623
+ "loss": 0.1426,
624
+ "rewards/accuracies": 0.9437500238418579,
625
+ "rewards/chosen": 1.1101362705230713,
626
+ "rewards/margins": 4.539022922515869,
627
+ "rewards/rejected": -3.4288864135742188,
628
+ "step": 185
629
+ },
630
+ {
631
+ "epoch": 1.3768115942028984,
632
+ "grad_norm": 15.103230138315158,
633
+ "learning_rate": 5.851217778611993e-07,
634
+ "logits/chosen": -2.2735018730163574,
635
+ "logits/rejected": -2.272218942642212,
636
+ "logps/chosen": -196.11097717285156,
637
+ "logps/rejected": -217.5700225830078,
638
+ "loss": 0.1351,
639
+ "rewards/accuracies": 0.925000011920929,
640
+ "rewards/chosen": 0.8358832597732544,
641
+ "rewards/margins": 4.623397350311279,
642
+ "rewards/rejected": -3.7875142097473145,
643
+ "step": 190
644
+ },
645
+ {
646
+ "epoch": 1.4130434782608696,
647
+ "grad_norm": 23.621795316466446,
648
+ "learning_rate": 5.659055155189651e-07,
649
+ "logits/chosen": -2.325028896331787,
650
+ "logits/rejected": -2.252711296081543,
651
+ "logps/chosen": -193.0414276123047,
652
+ "logps/rejected": -231.9776611328125,
653
+ "loss": 0.1542,
654
+ "rewards/accuracies": 0.9437500238418579,
655
+ "rewards/chosen": 0.06639621406793594,
656
+ "rewards/margins": 4.913178443908691,
657
+ "rewards/rejected": -4.8467817306518555,
658
+ "step": 195
659
+ },
660
+ {
661
+ "epoch": 1.4492753623188406,
662
+ "grad_norm": 11.913033785498357,
663
+ "learning_rate": 5.465896337420358e-07,
664
+ "logits/chosen": -2.3189964294433594,
665
+ "logits/rejected": -2.2462618350982666,
666
+ "logps/chosen": -207.55685424804688,
667
+ "logps/rejected": -269.5002746582031,
668
+ "loss": 0.1455,
669
+ "rewards/accuracies": 0.9437500238418579,
670
+ "rewards/chosen": 0.3675020635128021,
671
+ "rewards/margins": 5.308381080627441,
672
+ "rewards/rejected": -4.940878868103027,
673
+ "step": 200
674
+ },
675
+ {
676
+ "epoch": 1.4492753623188406,
677
+ "eval_logits/chosen": -2.2696547508239746,
678
+ "eval_logits/rejected": -2.246581554412842,
679
+ "eval_logps/chosen": -205.6576690673828,
680
+ "eval_logps/rejected": -234.53866577148438,
681
+ "eval_loss": 0.42552247643470764,
682
+ "eval_rewards/accuracies": 0.8165322542190552,
683
+ "eval_rewards/chosen": -0.5338226556777954,
684
+ "eval_rewards/margins": 3.099236011505127,
685
+ "eval_rewards/rejected": -3.633058547973633,
686
+ "eval_runtime": 247.2511,
687
+ "eval_samples_per_second": 15.862,
688
+ "eval_steps_per_second": 0.251,
689
+ "step": 200
690
+ },
691
+ {
692
+ "epoch": 1.4855072463768115,
693
+ "grad_norm": 21.298531736271027,
694
+ "learning_rate": 5.272033294376521e-07,
695
+ "logits/chosen": -2.2834343910217285,
696
+ "logits/rejected": -2.2502143383026123,
697
+ "logps/chosen": -191.12301635742188,
698
+ "logps/rejected": -221.69924926757812,
699
+ "loss": 0.1402,
700
+ "rewards/accuracies": 0.9375,
701
+ "rewards/chosen": 0.3291940987110138,
702
+ "rewards/margins": 4.7104597091674805,
703
+ "rewards/rejected": -4.381266117095947,
704
+ "step": 205
705
+ },
706
+ {
707
+ "epoch": 1.5217391304347827,
708
+ "grad_norm": 16.624769093346845,
709
+ "learning_rate": 5.077759059601755e-07,
710
+ "logits/chosen": -2.312807559967041,
711
+ "logits/rejected": -2.29304838180542,
712
+ "logps/chosen": -206.1201629638672,
713
+ "logps/rejected": -224.8920135498047,
714
+ "loss": 0.1564,
715
+ "rewards/accuracies": 0.949999988079071,
716
+ "rewards/chosen": 0.7266442775726318,
717
+ "rewards/margins": 5.301372528076172,
718
+ "rewards/rejected": -4.574727535247803,
719
+ "step": 210
720
+ },
721
+ {
722
+ "epoch": 1.5579710144927537,
723
+ "grad_norm": 17.710918361893075,
724
+ "learning_rate": 4.883367288176238e-07,
725
+ "logits/chosen": -2.325953483581543,
726
+ "logits/rejected": -2.355797290802002,
727
+ "logps/chosen": -178.1147918701172,
728
+ "logps/rejected": -228.8972930908203,
729
+ "loss": 0.1462,
730
+ "rewards/accuracies": 0.949999988079071,
731
+ "rewards/chosen": 1.0754575729370117,
732
+ "rewards/margins": 5.179568290710449,
733
+ "rewards/rejected": -4.104111194610596,
734
+ "step": 215
735
+ },
736
+ {
737
+ "epoch": 1.5942028985507246,
738
+ "grad_norm": 17.61185095540283,
739
+ "learning_rate": 4.6891518128425974e-07,
740
+ "logits/chosen": -2.3683056831359863,
741
+ "logits/rejected": -2.327507495880127,
742
+ "logps/chosen": -198.29257202148438,
743
+ "logps/rejected": -239.8541717529297,
744
+ "loss": 0.149,
745
+ "rewards/accuracies": 0.949999988079071,
746
+ "rewards/chosen": 1.2280995845794678,
747
+ "rewards/margins": 5.207882881164551,
748
+ "rewards/rejected": -3.979783535003662,
749
+ "step": 220
750
+ },
751
+ {
752
+ "epoch": 1.6304347826086958,
753
+ "grad_norm": 18.295935717153018,
754
+ "learning_rate": 4.495406199863217e-07,
755
+ "logits/chosen": -2.3297061920166016,
756
+ "logits/rejected": -2.3166041374206543,
757
+ "logps/chosen": -180.5458221435547,
758
+ "logps/rejected": -259.0296630859375,
759
+ "loss": 0.1258,
760
+ "rewards/accuracies": 0.956250011920929,
761
+ "rewards/chosen": 0.7601513266563416,
762
+ "rewards/margins": 5.343691825866699,
763
+ "rewards/rejected": -4.583540916442871,
764
+ "step": 225
765
+ },
766
+ {
767
+ "epoch": 1.6666666666666665,
768
+ "grad_norm": 20.56329538942416,
769
+ "learning_rate": 4.302423305280385e-07,
770
+ "logits/chosen": -2.342526912689209,
771
+ "logits/rejected": -2.293287992477417,
772
+ "logps/chosen": -181.458251953125,
773
+ "logps/rejected": -268.34442138671875,
774
+ "loss": 0.1301,
775
+ "rewards/accuracies": 0.9375,
776
+ "rewards/chosen": 0.30324190855026245,
777
+ "rewards/margins": 5.840424060821533,
778
+ "rewards/rejected": -5.537181854248047,
779
+ "step": 230
780
+ },
781
+ {
782
+ "epoch": 1.7028985507246377,
783
+ "grad_norm": 18.749044820633504,
784
+ "learning_rate": 4.1104948322499386e-07,
785
+ "logits/chosen": -2.36098575592041,
786
+ "logits/rejected": -2.310408353805542,
787
+ "logps/chosen": -187.14903259277344,
788
+ "logps/rejected": -245.0007781982422,
789
+ "loss": 0.1278,
790
+ "rewards/accuracies": 0.949999988079071,
791
+ "rewards/chosen": 0.39717116951942444,
792
+ "rewards/margins": 5.458386421203613,
793
+ "rewards/rejected": -5.061215400695801,
794
+ "step": 235
795
+ },
796
+ {
797
+ "epoch": 1.7391304347826086,
798
+ "grad_norm": 19.17022127722479,
799
+ "learning_rate": 3.919910890117584e-07,
800
+ "logits/chosen": -2.3718903064727783,
801
+ "logits/rejected": -2.348759889602661,
802
+ "logps/chosen": -180.43243408203125,
803
+ "logps/rejected": -232.767333984375,
804
+ "loss": 0.1358,
805
+ "rewards/accuracies": 0.925000011920929,
806
+ "rewards/chosen": 0.7176074981689453,
807
+ "rewards/margins": 5.331349849700928,
808
+ "rewards/rejected": -4.613741874694824,
809
+ "step": 240
810
+ },
811
+ {
812
+ "epoch": 1.7391304347826086,
813
+ "eval_logits/chosen": -2.3806025981903076,
814
+ "eval_logits/rejected": -2.360522508621216,
815
+ "eval_logps/chosen": -203.0333251953125,
816
+ "eval_logps/rejected": -234.9227294921875,
817
+ "eval_loss": 0.4247341454029083,
818
+ "eval_rewards/accuracies": 0.8326612710952759,
819
+ "eval_rewards/chosen": -0.27138766646385193,
820
+ "eval_rewards/margins": 3.4000766277313232,
821
+ "eval_rewards/rejected": -3.671464204788208,
822
+ "eval_runtime": 247.4296,
823
+ "eval_samples_per_second": 15.851,
824
+ "eval_steps_per_second": 0.251,
825
+ "step": 240
826
+ },
827
+ {
828
+ "epoch": 1.7753623188405796,
829
+ "grad_norm": 16.252609084074862,
830
+ "learning_rate": 3.7309595559042973e-07,
831
+ "logits/chosen": -2.3761582374572754,
832
+ "logits/rejected": -2.371837615966797,
833
+ "logps/chosen": -185.31297302246094,
834
+ "logps/rejected": -231.5406951904297,
835
+ "loss": 0.1311,
836
+ "rewards/accuracies": 0.9375,
837
+ "rewards/chosen": 0.5640401840209961,
838
+ "rewards/margins": 5.0947370529174805,
839
+ "rewards/rejected": -4.530695915222168,
840
+ "step": 245
841
+ },
842
+ {
843
+ "epoch": 1.8115942028985508,
844
+ "grad_norm": 20.868443403720576,
845
+ "learning_rate": 3.54392643886374e-07,
846
+ "logits/chosen": -2.4362902641296387,
847
+ "logits/rejected": -2.405050277709961,
848
+ "logps/chosen": -186.8355255126953,
849
+ "logps/rejected": -232.0409698486328,
850
+ "loss": 0.1252,
851
+ "rewards/accuracies": 0.9437500238418579,
852
+ "rewards/chosen": 0.699629545211792,
853
+ "rewards/margins": 5.528279781341553,
854
+ "rewards/rejected": -4.82865047454834,
855
+ "step": 250
856
+ },
857
+ {
858
+ "epoch": 1.8478260869565217,
859
+ "grad_norm": 20.235567534724474,
860
+ "learning_rate": 3.3590942487697765e-07,
861
+ "logits/chosen": -2.4567503929138184,
862
+ "logits/rejected": -2.444530725479126,
863
+ "logps/chosen": -186.48788452148438,
864
+ "logps/rejected": -223.74258422851562,
865
+ "loss": 0.1613,
866
+ "rewards/accuracies": 0.918749988079071,
867
+ "rewards/chosen": 0.8208659291267395,
868
+ "rewards/margins": 4.932476997375488,
869
+ "rewards/rejected": -4.111611366271973,
870
+ "step": 255
871
+ },
872
+ {
873
+ "epoch": 1.8840579710144927,
874
+ "grad_norm": 17.41545455778026,
875
+ "learning_rate": 3.176742368586725e-07,
876
+ "logits/chosen": -2.452131986618042,
877
+ "logits/rejected": -2.415022373199463,
878
+ "logps/chosen": -196.01910400390625,
879
+ "logps/rejected": -217.8632354736328,
880
+ "loss": 0.1348,
881
+ "rewards/accuracies": 0.925000011920929,
882
+ "rewards/chosen": 0.8574349284172058,
883
+ "rewards/margins": 5.437888145446777,
884
+ "rewards/rejected": -4.580452919006348,
885
+ "step": 260
886
+ },
887
+ {
888
+ "epoch": 1.9202898550724639,
889
+ "grad_norm": 16.095749631795933,
890
+ "learning_rate": 2.997146432168236e-07,
891
+ "logits/chosen": -2.44551420211792,
892
+ "logits/rejected": -2.410526990890503,
893
+ "logps/chosen": -192.61859130859375,
894
+ "logps/rejected": -246.4084014892578,
895
+ "loss": 0.1427,
896
+ "rewards/accuracies": 0.925000011920929,
897
+ "rewards/chosen": 0.6556909084320068,
898
+ "rewards/margins": 6.007452964782715,
899
+ "rewards/rejected": -5.351761817932129,
900
+ "step": 265
901
+ },
902
+ {
903
+ "epoch": 1.9565217391304348,
904
+ "grad_norm": 20.740869553463348,
905
+ "learning_rate": 2.8205779076231446e-07,
906
+ "logits/chosen": -2.430431842803955,
907
+ "logits/rejected": -2.4140846729278564,
908
+ "logps/chosen": -191.99880981445312,
909
+ "logps/rejected": -238.8526611328125,
910
+ "loss": 0.1257,
911
+ "rewards/accuracies": 0.949999988079071,
912
+ "rewards/chosen": 0.9606460332870483,
913
+ "rewards/margins": 5.507210731506348,
914
+ "rewards/rejected": -4.54656457901001,
915
+ "step": 270
916
+ },
917
+ {
918
+ "epoch": 1.9927536231884058,
919
+ "grad_norm": 19.416516281755985,
920
+ "learning_rate": 2.647303686978035e-07,
921
+ "logits/chosen": -2.3993430137634277,
922
+ "logits/rejected": -2.376300811767578,
923
+ "logps/chosen": -185.33132934570312,
924
+ "logps/rejected": -219.64968872070312,
925
+ "loss": 0.1492,
926
+ "rewards/accuracies": 0.925000011920929,
927
+ "rewards/chosen": 0.5704915523529053,
928
+ "rewards/margins": 4.9512529373168945,
929
+ "rewards/rejected": -4.38076114654541,
930
+ "step": 275
931
+ },
932
+ {
933
+ "epoch": 2.028985507246377,
934
+ "grad_norm": 8.699243855503047,
935
+ "learning_rate": 2.4775856827568014e-07,
936
+ "logits/chosen": -2.397235155105591,
937
+ "logits/rejected": -2.38557767868042,
938
+ "logps/chosen": -187.7579803466797,
939
+ "logps/rejected": -224.9508514404297,
940
+ "loss": 0.0938,
941
+ "rewards/accuracies": 0.949999988079071,
942
+ "rewards/chosen": 0.7930246591567993,
943
+ "rewards/margins": 5.4677629470825195,
944
+ "rewards/rejected": -4.674737930297852,
945
+ "step": 280
946
+ },
947
+ {
948
+ "epoch": 2.028985507246377,
949
+ "eval_logits/chosen": -2.3933310508728027,
950
+ "eval_logits/rejected": -2.372490167617798,
951
+ "eval_logps/chosen": -203.4556121826172,
952
+ "eval_logps/rejected": -235.21469116210938,
953
+ "eval_loss": 0.41276049613952637,
954
+ "eval_rewards/accuracies": 0.8266128897666931,
955
+ "eval_rewards/chosen": -0.313615620136261,
956
+ "eval_rewards/margins": 3.3870484828948975,
957
+ "eval_rewards/rejected": -3.7006635665893555,
958
+ "eval_runtime": 247.2901,
959
+ "eval_samples_per_second": 15.86,
960
+ "eval_steps_per_second": 0.251,
961
+ "step": 280
962
+ },
963
+ {
964
+ "epoch": 2.0652173913043477,
965
+ "grad_norm": 8.518008709307914,
966
+ "learning_rate": 2.3116804320869464e-07,
967
+ "logits/chosen": -2.3717570304870605,
968
+ "logits/rejected": -2.358806610107422,
969
+ "logps/chosen": -189.88809204101562,
970
+ "logps/rejected": -226.0210418701172,
971
+ "loss": 0.072,
972
+ "rewards/accuracies": 0.981249988079071,
973
+ "rewards/chosen": 0.5044176578521729,
974
+ "rewards/margins": 5.758059978485107,
975
+ "rewards/rejected": -5.253642559051514,
976
+ "step": 285
977
+ },
978
+ {
979
+ "epoch": 2.101449275362319,
980
+ "grad_norm": 10.070711388481515,
981
+ "learning_rate": 2.1498387089310865e-07,
982
+ "logits/chosen": -2.3519985675811768,
983
+ "logits/rejected": -2.361879348754883,
984
+ "logps/chosen": -195.57469177246094,
985
+ "logps/rejected": -251.92440795898438,
986
+ "loss": 0.0702,
987
+ "rewards/accuracies": 0.9624999761581421,
988
+ "rewards/chosen": 1.01186203956604,
989
+ "rewards/margins": 6.105987548828125,
990
+ "rewards/rejected": -5.094125747680664,
991
+ "step": 290
992
+ },
993
+ {
994
+ "epoch": 2.13768115942029,
995
+ "grad_norm": 9.472204652777094,
996
+ "learning_rate": 1.9923051450297336e-07,
997
+ "logits/chosen": -2.384852647781372,
998
+ "logits/rejected": -2.346426486968994,
999
+ "logps/chosen": -194.14443969726562,
1000
+ "logps/rejected": -229.8656463623047,
1001
+ "loss": 0.0725,
1002
+ "rewards/accuracies": 0.96875,
1003
+ "rewards/chosen": 1.0537793636322021,
1004
+ "rewards/margins": 6.0652875900268555,
1005
+ "rewards/rejected": -5.011508464813232,
1006
+ "step": 295
1007
+ },
1008
+ {
1009
+ "epoch": 2.1739130434782608,
1010
+ "grad_norm": 11.964654423069804,
1011
+ "learning_rate": 1.839317860128368e-07,
1012
+ "logits/chosen": -2.368323802947998,
1013
+ "logits/rejected": -2.3484463691711426,
1014
+ "logps/chosen": -190.7874298095703,
1015
+ "logps/rejected": -249.8258514404297,
1016
+ "loss": 0.0649,
1017
+ "rewards/accuracies": 0.9937499761581421,
1018
+ "rewards/chosen": 0.9586502909660339,
1019
+ "rewards/margins": 6.444998264312744,
1020
+ "rewards/rejected": -5.4863481521606445,
1021
+ "step": 300
1022
+ },
1023
+ {
1024
+ "epoch": 2.210144927536232,
1025
+ "grad_norm": 14.363847590302074,
1026
+ "learning_rate": 1.6911081020477176e-07,
1027
+ "logits/chosen": -2.350074291229248,
1028
+ "logits/rejected": -2.3347928524017334,
1029
+ "logps/chosen": -188.36700439453125,
1030
+ "logps/rejected": -253.87844848632812,
1031
+ "loss": 0.0638,
1032
+ "rewards/accuracies": 0.949999988079071,
1033
+ "rewards/chosen": 0.843727707862854,
1034
+ "rewards/margins": 6.143573760986328,
1035
+ "rewards/rejected": -5.299846649169922,
1036
+ "step": 305
1037
+ },
1038
+ {
1039
+ "epoch": 2.246376811594203,
1040
+ "grad_norm": 12.207090391342035,
1041
+ "learning_rate": 1.5478998971412666e-07,
1042
+ "logits/chosen": -2.3135275840759277,
1043
+ "logits/rejected": -2.302867889404297,
1044
+ "logps/chosen": -190.8241424560547,
1045
+ "logps/rejected": -256.7770080566406,
1046
+ "loss": 0.0644,
1047
+ "rewards/accuracies": 0.981249988079071,
1048
+ "rewards/chosen": 0.7945331931114197,
1049
+ "rewards/margins": 6.6735687255859375,
1050
+ "rewards/rejected": -5.879034996032715,
1051
+ "step": 310
1052
+ },
1053
+ {
1054
+ "epoch": 2.282608695652174,
1055
+ "grad_norm": 17.814039974196998,
1056
+ "learning_rate": 1.4099097116683873e-07,
1057
+ "logits/chosen": -2.3371047973632812,
1058
+ "logits/rejected": -2.318176746368408,
1059
+ "logps/chosen": -211.5010986328125,
1060
+ "logps/rejected": -276.4696350097656,
1061
+ "loss": 0.0609,
1062
+ "rewards/accuracies": 0.987500011920929,
1063
+ "rewards/chosen": 1.2731298208236694,
1064
+ "rewards/margins": 7.202207088470459,
1065
+ "rewards/rejected": -5.9290771484375,
1066
+ "step": 315
1067
+ },
1068
+ {
1069
+ "epoch": 2.318840579710145,
1070
+ "grad_norm": 12.87715616525982,
1071
+ "learning_rate": 1.2773461245949247e-07,
1072
+ "logits/chosen": -2.339965343475342,
1073
+ "logits/rejected": -2.3124070167541504,
1074
+ "logps/chosen": -203.6749725341797,
1075
+ "logps/rejected": -248.37149047851562,
1076
+ "loss": 0.0592,
1077
+ "rewards/accuracies": 0.9937499761581421,
1078
+ "rewards/chosen": 0.8933491706848145,
1079
+ "rewards/margins": 6.6305975914001465,
1080
+ "rewards/rejected": -5.737248420715332,
1081
+ "step": 320
1082
+ },
1083
+ {
1084
+ "epoch": 2.318840579710145,
1085
+ "eval_logits/chosen": -2.335758686065674,
1086
+ "eval_logits/rejected": -2.310899257659912,
1087
+ "eval_logps/chosen": -206.08692932128906,
1088
+ "eval_logps/rejected": -239.44287109375,
1089
+ "eval_loss": 0.4438334107398987,
1090
+ "eval_rewards/accuracies": 0.8165322542190552,
1091
+ "eval_rewards/chosen": -0.576747715473175,
1092
+ "eval_rewards/margins": 3.546731948852539,
1093
+ "eval_rewards/rejected": -4.123479843139648,
1094
+ "eval_runtime": 247.2469,
1095
+ "eval_samples_per_second": 15.863,
1096
+ "eval_steps_per_second": 0.251,
1097
+ "step": 320
1098
+ },
1099
+ {
1100
+ "epoch": 2.355072463768116,
1101
+ "grad_norm": 12.127468615559982,
1102
+ "learning_rate": 1.1504095123158014e-07,
1103
+ "logits/chosen": -2.3464112281799316,
1104
+ "logits/rejected": -2.3510231971740723,
1105
+ "logps/chosen": -194.9401397705078,
1106
+ "logps/rejected": -256.2391052246094,
1107
+ "loss": 0.0642,
1108
+ "rewards/accuracies": 0.981249988079071,
1109
+ "rewards/chosen": 0.7289390563964844,
1110
+ "rewards/margins": 6.520253658294678,
1111
+ "rewards/rejected": -5.791314125061035,
1112
+ "step": 325
1113
+ },
1114
+ {
1115
+ "epoch": 2.391304347826087,
1116
+ "grad_norm": 9.771341581861016,
1117
+ "learning_rate": 1.0292917457762323e-07,
1118
+ "logits/chosen": -2.326569080352783,
1119
+ "logits/rejected": -2.2930023670196533,
1120
+ "logps/chosen": -188.14108276367188,
1121
+ "logps/rejected": -246.42495727539062,
1122
+ "loss": 0.0575,
1123
+ "rewards/accuracies": 0.981249988079071,
1124
+ "rewards/chosen": 0.9616584777832031,
1125
+ "rewards/margins": 6.517239570617676,
1126
+ "rewards/rejected": -5.555581092834473,
1127
+ "step": 330
1128
+ },
1129
+ {
1130
+ "epoch": 2.427536231884058,
1131
+ "grad_norm": 14.867872053501438,
1132
+ "learning_rate": 9.141759004493282e-08,
1133
+ "logits/chosen": -2.304109573364258,
1134
+ "logits/rejected": -2.3050553798675537,
1135
+ "logps/chosen": -179.35818481445312,
1136
+ "logps/rejected": -243.79220581054688,
1137
+ "loss": 0.0539,
1138
+ "rewards/accuracies": 0.9937499761581421,
1139
+ "rewards/chosen": 0.44949406385421753,
1140
+ "rewards/margins": 6.40158224105835,
1141
+ "rewards/rejected": -5.952088832855225,
1142
+ "step": 335
1143
+ },
1144
+ {
1145
+ "epoch": 2.463768115942029,
1146
+ "grad_norm": 15.245309497391487,
1147
+ "learning_rate": 8.052359796084951e-08,
1148
+ "logits/chosen": -2.3356070518493652,
1149
+ "logits/rejected": -2.2882630825042725,
1150
+ "logps/chosen": -191.533447265625,
1151
+ "logps/rejected": -248.5685577392578,
1152
+ "loss": 0.0649,
1153
+ "rewards/accuracies": 0.987500011920929,
1154
+ "rewards/chosen": 1.079332947731018,
1155
+ "rewards/margins": 6.897538185119629,
1156
+ "rewards/rejected": -5.818203926086426,
1157
+ "step": 340
1158
+ },
1159
+ {
1160
+ "epoch": 2.5,
1161
+ "grad_norm": 15.656947068033539,
1162
+ "learning_rate": 7.026366513129139e-08,
1163
+ "logits/chosen": -2.320748805999756,
1164
+ "logits/rejected": -2.2670602798461914,
1165
+ "logps/chosen": -184.66661071777344,
1166
+ "logps/rejected": -229.6528778076172,
1167
+ "loss": 0.0628,
1168
+ "rewards/accuracies": 0.987500011920929,
1169
+ "rewards/chosen": 0.8999651670455933,
1170
+ "rewards/margins": 6.056799411773682,
1171
+ "rewards/rejected": -5.156834125518799,
1172
+ "step": 345
1173
+ },
1174
+ {
1175
+ "epoch": 2.536231884057971,
1176
+ "grad_norm": 10.866006807463382,
1177
+ "learning_rate": 6.065329995036572e-08,
1178
+ "logits/chosen": -2.352247953414917,
1179
+ "logits/rejected": -2.3084654808044434,
1180
+ "logps/chosen": -191.09317016601562,
1181
+ "logps/rejected": -235.8540496826172,
1182
+ "loss": 0.0577,
1183
+ "rewards/accuracies": 0.987500011920929,
1184
+ "rewards/chosen": 0.519118070602417,
1185
+ "rewards/margins": 5.989134311676025,
1186
+ "rewards/rejected": -5.470016002655029,
1187
+ "step": 350
1188
+ },
1189
+ {
1190
+ "epoch": 2.572463768115942,
1191
+ "grad_norm": 13.773720162250582,
1192
+ "learning_rate": 5.170702895866591e-08,
1193
+ "logits/chosen": -2.3459525108337402,
1194
+ "logits/rejected": -2.2808985710144043,
1195
+ "logps/chosen": -181.71859741210938,
1196
+ "logps/rejected": -234.2205352783203,
1197
+ "loss": 0.0527,
1198
+ "rewards/accuracies": 0.9937499761581421,
1199
+ "rewards/chosen": 0.7595393061637878,
1200
+ "rewards/margins": 6.467637062072754,
1201
+ "rewards/rejected": -5.708098411560059,
1202
+ "step": 355
1203
+ },
1204
+ {
1205
+ "epoch": 2.608695652173913,
1206
+ "grad_norm": 17.050512305140668,
1207
+ "learning_rate": 4.343837488569057e-08,
1208
+ "logits/chosen": -2.3642101287841797,
1209
+ "logits/rejected": -2.334510087966919,
1210
+ "logps/chosen": -188.16355895996094,
1211
+ "logps/rejected": -243.60531616210938,
1212
+ "loss": 0.0673,
1213
+ "rewards/accuracies": 0.9750000238418579,
1214
+ "rewards/chosen": 0.8965447545051575,
1215
+ "rewards/margins": 6.54379940032959,
1216
+ "rewards/rejected": -5.647254943847656,
1217
+ "step": 360
1218
+ },
1219
+ {
1220
+ "epoch": 2.608695652173913,
1221
+ "eval_logits/chosen": -2.3496623039245605,
1222
+ "eval_logits/rejected": -2.3254337310791016,
1223
+ "eval_logps/chosen": -206.58370971679688,
1224
+ "eval_logps/rejected": -241.21258544921875,
1225
+ "eval_loss": 0.455331414937973,
1226
+ "eval_rewards/accuracies": 0.8205645084381104,
1227
+ "eval_rewards/chosen": -0.6264265775680542,
1228
+ "eval_rewards/margins": 3.674025774002075,
1229
+ "eval_rewards/rejected": -4.300451755523682,
1230
+ "eval_runtime": 247.544,
1231
+ "eval_samples_per_second": 15.844,
1232
+ "eval_steps_per_second": 0.25,
1233
+ "step": 360
1234
+ },
1235
+ {
1236
+ "epoch": 2.644927536231884,
1237
+ "grad_norm": 13.189480623721069,
1238
+ "learning_rate": 3.585983620957112e-08,
1239
+ "logits/chosen": -2.361184597015381,
1240
+ "logits/rejected": -2.3199973106384277,
1241
+ "logps/chosen": -183.58596801757812,
1242
+ "logps/rejected": -238.3034210205078,
1243
+ "loss": 0.0632,
1244
+ "rewards/accuracies": 0.9937499761581421,
1245
+ "rewards/chosen": 0.6551353931427002,
1246
+ "rewards/margins": 6.403916358947754,
1247
+ "rewards/rejected": -5.748780250549316,
1248
+ "step": 365
1249
+ },
1250
+ {
1251
+ "epoch": 2.681159420289855,
1252
+ "grad_norm": 10.993397358198846,
1253
+ "learning_rate": 2.8982868265005454e-08,
1254
+ "logits/chosen": -2.3595023155212402,
1255
+ "logits/rejected": -2.3268685340881348,
1256
+ "logps/chosen": -189.55947875976562,
1257
+ "logps/rejected": -237.3144073486328,
1258
+ "loss": 0.0587,
1259
+ "rewards/accuracies": 0.987500011920929,
1260
+ "rewards/chosen": 0.8602153658866882,
1261
+ "rewards/margins": 6.659050941467285,
1262
+ "rewards/rejected": -5.798834800720215,
1263
+ "step": 370
1264
+ },
1265
+ {
1266
+ "epoch": 2.717391304347826,
1267
+ "grad_norm": 13.4301289511478,
1268
+ "learning_rate": 2.2817865927956092e-08,
1269
+ "logits/chosen": -2.3729586601257324,
1270
+ "logits/rejected": -2.333218574523926,
1271
+ "logps/chosen": -182.43051147460938,
1272
+ "logps/rejected": -235.4917449951172,
1273
+ "loss": 0.0602,
1274
+ "rewards/accuracies": 0.981249988079071,
1275
+ "rewards/chosen": 0.953597903251648,
1276
+ "rewards/margins": 6.269543647766113,
1277
+ "rewards/rejected": -5.315945148468018,
1278
+ "step": 375
1279
+ },
1280
+ {
1281
+ "epoch": 2.753623188405797,
1282
+ "grad_norm": 13.712934546586954,
1283
+ "learning_rate": 1.7374147903282176e-08,
1284
+ "logits/chosen": -2.352264881134033,
1285
+ "logits/rejected": -2.317417860031128,
1286
+ "logps/chosen": -196.2283477783203,
1287
+ "logps/rejected": -243.51541137695312,
1288
+ "loss": 0.0483,
1289
+ "rewards/accuracies": 0.987500011920929,
1290
+ "rewards/chosen": 0.6484888792037964,
1291
+ "rewards/margins": 6.524939060211182,
1292
+ "rewards/rejected": -5.876450538635254,
1293
+ "step": 380
1294
+ },
1295
+ {
1296
+ "epoch": 2.789855072463768,
1297
+ "grad_norm": 7.887220453233511,
1298
+ "learning_rate": 1.2659942639057952e-08,
1299
+ "logits/chosen": -2.3527908325195312,
1300
+ "logits/rejected": -2.324645757675171,
1301
+ "logps/chosen": -197.2644500732422,
1302
+ "logps/rejected": -250.51913452148438,
1303
+ "loss": 0.0557,
1304
+ "rewards/accuracies": 0.987500011920929,
1305
+ "rewards/chosen": 0.7034837603569031,
1306
+ "rewards/margins": 6.565079689025879,
1307
+ "rewards/rejected": -5.861596584320068,
1308
+ "step": 385
1309
+ },
1310
+ {
1311
+ "epoch": 2.8260869565217392,
1312
+ "grad_norm": 10.716157650003963,
1313
+ "learning_rate": 8.682375888868166e-09,
1314
+ "logits/chosen": -2.352689027786255,
1315
+ "logits/rejected": -2.3404457569122314,
1316
+ "logps/chosen": -190.28585815429688,
1317
+ "logps/rejected": -256.22747802734375,
1318
+ "loss": 0.0505,
1319
+ "rewards/accuracies": 0.96875,
1320
+ "rewards/chosen": 0.4607642590999603,
1321
+ "rewards/margins": 6.127005577087402,
1322
+ "rewards/rejected": -5.666241645812988,
1323
+ "step": 390
1324
+ },
1325
+ {
1326
+ "epoch": 2.86231884057971,
1327
+ "grad_norm": 10.578934340288717,
1328
+ "learning_rate": 5.447459940880084e-09,
1329
+ "logits/chosen": -2.3586277961730957,
1330
+ "logits/rejected": -2.3280324935913086,
1331
+ "logps/chosen": -183.1387481689453,
1332
+ "logps/rejected": -248.5103759765625,
1333
+ "loss": 0.0541,
1334
+ "rewards/accuracies": 0.96875,
1335
+ "rewards/chosen": 0.48908624053001404,
1336
+ "rewards/margins": 6.304218292236328,
1337
+ "rewards/rejected": -5.815131664276123,
1338
+ "step": 395
1339
+ },
1340
+ {
1341
+ "epoch": 2.898550724637681,
1342
+ "grad_norm": 13.682225413217974,
1343
+ "learning_rate": 2.9600845299737053e-09,
1344
+ "logits/chosen": -2.340261936187744,
1345
+ "logits/rejected": -2.314548969268799,
1346
+ "logps/chosen": -176.70303344726562,
1347
+ "logps/rejected": -237.1485595703125,
1348
+ "loss": 0.0728,
1349
+ "rewards/accuracies": 0.9624999761581421,
1350
+ "rewards/chosen": 0.3482286036014557,
1351
+ "rewards/margins": 6.158910274505615,
1352
+ "rewards/rejected": -5.810681343078613,
1353
+ "step": 400
1354
+ },
1355
+ {
1356
+ "epoch": 2.898550724637681,
1357
+ "eval_logits/chosen": -2.3491718769073486,
1358
+ "eval_logits/rejected": -2.3246846199035645,
1359
+ "eval_logps/chosen": -207.1744384765625,
1360
+ "eval_logps/rejected": -242.15025329589844,
1361
+ "eval_loss": 0.45203983783721924,
1362
+ "eval_rewards/accuracies": 0.8185483813285828,
1363
+ "eval_rewards/chosen": -0.6855015158653259,
1364
+ "eval_rewards/margins": 3.708717107772827,
1365
+ "eval_rewards/rejected": -4.394218921661377,
1366
+ "eval_runtime": 247.4572,
1367
+ "eval_samples_per_second": 15.849,
1368
+ "eval_steps_per_second": 0.251,
1369
+ "step": 400
1370
+ },
1371
+ {
1372
+ "epoch": 2.9347826086956523,
1373
+ "grad_norm": 12.30646518010628,
1374
+ "learning_rate": 1.2240094466668404e-09,
1375
+ "logits/chosen": -2.3764843940734863,
1376
+ "logits/rejected": -2.30718994140625,
1377
+ "logps/chosen": -190.95550537109375,
1378
+ "logps/rejected": -267.6639709472656,
1379
+ "loss": 0.0602,
1380
+ "rewards/accuracies": 0.981249988079071,
1381
+ "rewards/chosen": 0.5119168162345886,
1382
+ "rewards/margins": 6.99010705947876,
1383
+ "rewards/rejected": -6.478189945220947,
1384
+ "step": 405
1385
+ },
1386
+ {
1387
+ "epoch": 2.971014492753623,
1388
+ "grad_norm": 17.986293235426054,
1389
+ "learning_rate": 2.418588540059607e-10,
1390
+ "logits/chosen": -2.359415292739868,
1391
+ "logits/rejected": -2.345515489578247,
1392
+ "logps/chosen": -186.134765625,
1393
+ "logps/rejected": -237.69613647460938,
1394
+ "loss": 0.058,
1395
+ "rewards/accuracies": 0.9937499761581421,
1396
+ "rewards/chosen": 0.9132804870605469,
1397
+ "rewards/margins": 6.650979518890381,
1398
+ "rewards/rejected": -5.737699031829834,
1399
+ "step": 410
1400
+ },
1401
+ {
1402
+ "epoch": 3.0,
1403
+ "step": 414,
1404
+ "total_flos": 4881795388538880.0,
1405
+ "train_loss": 0.21833302825689316,
1406
+ "train_runtime": 16364.2441,
1407
+ "train_samples_per_second": 6.47,
1408
+ "train_steps_per_second": 0.025
1409
+ }
1410
+ ],
1411
+ "logging_steps": 5,
1412
+ "max_steps": 414,
1413
+ "num_input_tokens_seen": 0,
1414
+ "num_train_epochs": 3,
1415
+ "save_steps": 40,
1416
+ "stateful_callbacks": {
1417
+ "TrainerControl": {
1418
+ "args": {
1419
+ "should_epoch_stop": false,
1420
+ "should_evaluate": false,
1421
+ "should_log": false,
1422
+ "should_save": true,
1423
+ "should_training_stop": true
1424
+ },
1425
+ "attributes": {}
1426
+ }
1427
+ },
1428
+ "total_flos": 4881795388538880.0,
1429
+ "train_batch_size": 8,
1430
+ "trial_name": null,
1431
+ "trial_params": null
1432
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af79ff24bae38840a3c8efe3d28d0cc2a77ca640996f1dd8521f5747a2625682
3
+ size 7096
training_eval_loss.png ADDED
training_loss.png ADDED
training_rewards_accuracies.png ADDED