Upload folder using huggingface_hub
Browse files- README.md +80 -0
- added_tokens.json +4 -0
- all_results.json +20 -0
- config.json +68 -0
- eval_results.json +15 -0
- generation_config.json +6 -0
- model-00001-of-00004.safetensors +3 -0
- model-00002-of-00004.safetensors +3 -0
- model-00003-of-00004.safetensors +3 -0
- model-00004-of-00004.safetensors +3 -0
- model.safetensors.index.json +694 -0
- preprocessor_config.json +52 -0
- special_tokens_map.json +30 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +70 -0
- train_results.json +8 -0
- trainer_log.jsonl +87 -0
- trainer_state.json +1339 -0
- training_args.bin +3 -0
- training_eval_loss.png +0 -0
- training_loss.png +0 -0
- training_rewards_accuracies.png +0 -0
README.md
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
license: other
|
4 |
+
base_model: llava-hf/llava-v1.6-mistral-7b-hf
|
5 |
+
tags:
|
6 |
+
- llama-factory
|
7 |
+
- full
|
8 |
+
- generated_from_trainer
|
9 |
+
model-index:
|
10 |
+
- name: AA_preference_cocour_new_step10_0_100
|
11 |
+
results: []
|
12 |
+
---
|
13 |
+
|
14 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
15 |
+
should probably proofread and complete it, then remove this comment. -->
|
16 |
+
|
17 |
+
# AA_preference_cocour_new_step10_0_100
|
18 |
+
|
19 |
+
This model is a fine-tuned version of [llava-hf/llava-v1.6-mistral-7b-hf](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf) on the AA_preference_cocour_new_step10_0_100 dataset.
|
20 |
+
It achieves the following results on the evaluation set:
|
21 |
+
- Loss: 0.4957
|
22 |
+
- Rewards/chosen: -0.4320
|
23 |
+
- Rewards/rejected: -3.0552
|
24 |
+
- Rewards/accuracies: 0.7917
|
25 |
+
- Rewards/margins: 2.6232
|
26 |
+
- Logps/rejected: -248.9210
|
27 |
+
- Logps/chosen: -252.8571
|
28 |
+
- Logits/rejected: -2.2740
|
29 |
+
- Logits/chosen: -2.3049
|
30 |
+
|
31 |
+
## Model description
|
32 |
+
|
33 |
+
More information needed
|
34 |
+
|
35 |
+
## Intended uses & limitations
|
36 |
+
|
37 |
+
More information needed
|
38 |
+
|
39 |
+
## Training and evaluation data
|
40 |
+
|
41 |
+
More information needed
|
42 |
+
|
43 |
+
## Training procedure
|
44 |
+
|
45 |
+
### Training hyperparameters
|
46 |
+
|
47 |
+
The following hyperparameters were used during training:
|
48 |
+
- learning_rate: 1e-06
|
49 |
+
- train_batch_size: 8
|
50 |
+
- eval_batch_size: 8
|
51 |
+
- seed: 42
|
52 |
+
- distributed_type: multi-GPU
|
53 |
+
- num_devices: 8
|
54 |
+
- gradient_accumulation_steps: 4
|
55 |
+
- total_train_batch_size: 256
|
56 |
+
- total_eval_batch_size: 64
|
57 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
58 |
+
- lr_scheduler_type: cosine
|
59 |
+
- lr_scheduler_warmup_steps: 10
|
60 |
+
- num_epochs: 3.0
|
61 |
+
|
62 |
+
### Training results
|
63 |
+
|
64 |
+
| Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
|
65 |
+
|:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
|
66 |
+
| 0.577 | 0.3738 | 50 | 0.5782 | 0.7960 | -0.1800 | 0.7250 | 0.9759 | -220.1687 | -240.5771 | -2.1546 | -2.1689 |
|
67 |
+
| 0.5388 | 0.7477 | 100 | 0.5391 | -0.4398 | -2.0133 | 0.7479 | 1.5735 | -238.5014 | -252.9343 | -2.1740 | -2.1991 |
|
68 |
+
| 0.2653 | 1.1215 | 150 | 0.5247 | 0.2862 | -1.6846 | 0.7646 | 1.9708 | -235.2147 | -245.6745 | -2.3266 | -2.3485 |
|
69 |
+
| 0.2571 | 1.4953 | 200 | 0.5108 | -0.5979 | -3.0808 | 0.7792 | 2.4828 | -249.1766 | -254.5160 | -2.4752 | -2.5016 |
|
70 |
+
| 0.2803 | 1.8692 | 250 | 0.4817 | -0.2909 | -2.6866 | 0.7854 | 2.3957 | -245.2348 | -251.4460 | -2.3853 | -2.4107 |
|
71 |
+
| 0.1739 | 2.2430 | 300 | 0.4912 | -0.3815 | -2.8477 | 0.7917 | 2.4662 | -246.8459 | -252.3520 | -2.3281 | -2.3560 |
|
72 |
+
| 0.1631 | 2.6168 | 350 | 0.4965 | -0.4101 | -3.0083 | 0.7896 | 2.5982 | -248.4518 | -252.6378 | -2.2784 | -2.3092 |
|
73 |
+
|
74 |
+
|
75 |
+
### Framework versions
|
76 |
+
|
77 |
+
- Transformers 4.45.2
|
78 |
+
- Pytorch 2.4.0+cu121
|
79 |
+
- Datasets 2.21.0
|
80 |
+
- Tokenizers 0.20.3
|
added_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"<image>": 32000,
|
3 |
+
"<pad>": 32001
|
4 |
+
}
|
all_results.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 2.983177570093458,
|
3 |
+
"eval_logits/chosen": -2.3048903942108154,
|
4 |
+
"eval_logits/rejected": -2.273998737335205,
|
5 |
+
"eval_logps/chosen": -252.85711669921875,
|
6 |
+
"eval_logps/rejected": -248.92103576660156,
|
7 |
+
"eval_loss": 0.4957020878791809,
|
8 |
+
"eval_rewards/accuracies": 0.7916666865348816,
|
9 |
+
"eval_rewards/chosen": -0.43204134702682495,
|
10 |
+
"eval_rewards/margins": 2.6231741905212402,
|
11 |
+
"eval_rewards/rejected": -3.05521559715271,
|
12 |
+
"eval_runtime": 251.7994,
|
13 |
+
"eval_samples_per_second": 15.087,
|
14 |
+
"eval_steps_per_second": 0.238,
|
15 |
+
"total_flos": 4704901791744000.0,
|
16 |
+
"train_loss": 0.3356622438084213,
|
17 |
+
"train_runtime": 15341.8197,
|
18 |
+
"train_samples_per_second": 6.686,
|
19 |
+
"train_steps_per_second": 0.026
|
20 |
+
}
|
config.json
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/data/align-anything/hantao/models/llava-v1.6-mistral-7b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlavaNextForConditionalGeneration"
|
5 |
+
],
|
6 |
+
"hidden_size": 4096,
|
7 |
+
"ignore_index": -100,
|
8 |
+
"image_grid_pinpoints": [
|
9 |
+
[
|
10 |
+
336,
|
11 |
+
672
|
12 |
+
],
|
13 |
+
[
|
14 |
+
672,
|
15 |
+
336
|
16 |
+
],
|
17 |
+
[
|
18 |
+
672,
|
19 |
+
672
|
20 |
+
],
|
21 |
+
[
|
22 |
+
1008,
|
23 |
+
336
|
24 |
+
],
|
25 |
+
[
|
26 |
+
336,
|
27 |
+
1008
|
28 |
+
]
|
29 |
+
],
|
30 |
+
"image_seq_length": 576,
|
31 |
+
"image_token_index": 32000,
|
32 |
+
"model_type": "llava_next",
|
33 |
+
"projector_hidden_act": "gelu",
|
34 |
+
"text_config": {
|
35 |
+
"_name_or_path": "mistralai/Mistral-7B-Instruct-v0.2",
|
36 |
+
"architectures": [
|
37 |
+
"MistralForCausalLM"
|
38 |
+
],
|
39 |
+
"intermediate_size": 14336,
|
40 |
+
"max_position_embeddings": 32768,
|
41 |
+
"model_type": "mistral",
|
42 |
+
"num_key_value_heads": 8,
|
43 |
+
"rms_norm_eps": 1e-05,
|
44 |
+
"rope_theta": 1000000.0,
|
45 |
+
"sliding_window": null,
|
46 |
+
"torch_dtype": "bfloat16",
|
47 |
+
"vocab_size": 32064
|
48 |
+
},
|
49 |
+
"tie_word_embeddings": false,
|
50 |
+
"torch_dtype": "bfloat16",
|
51 |
+
"transformers_version": "4.45.2",
|
52 |
+
"use_cache": false,
|
53 |
+
"use_image_newline_parameter": true,
|
54 |
+
"vision_config": {
|
55 |
+
"hidden_size": 1024,
|
56 |
+
"image_size": 336,
|
57 |
+
"intermediate_size": 4096,
|
58 |
+
"model_type": "clip_vision_model",
|
59 |
+
"num_attention_heads": 16,
|
60 |
+
"num_hidden_layers": 24,
|
61 |
+
"patch_size": 14,
|
62 |
+
"projection_dim": 768,
|
63 |
+
"vocab_size": 32000
|
64 |
+
},
|
65 |
+
"vision_feature_layer": -2,
|
66 |
+
"vision_feature_select_strategy": "default",
|
67 |
+
"vocab_size": 32064
|
68 |
+
}
|
eval_results.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 2.983177570093458,
|
3 |
+
"eval_logits/chosen": -2.3048903942108154,
|
4 |
+
"eval_logits/rejected": -2.273998737335205,
|
5 |
+
"eval_logps/chosen": -252.85711669921875,
|
6 |
+
"eval_logps/rejected": -248.92103576660156,
|
7 |
+
"eval_loss": 0.4957020878791809,
|
8 |
+
"eval_rewards/accuracies": 0.7916666865348816,
|
9 |
+
"eval_rewards/chosen": -0.43204134702682495,
|
10 |
+
"eval_rewards/margins": 2.6231741905212402,
|
11 |
+
"eval_rewards/rejected": -3.05521559715271,
|
12 |
+
"eval_runtime": 251.7994,
|
13 |
+
"eval_samples_per_second": 15.087,
|
14 |
+
"eval_steps_per_second": 0.238
|
15 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 2,
|
5 |
+
"transformers_version": "4.45.2"
|
6 |
+
}
|
model-00001-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ccbd31ae05a79cb1ca71b2f550691e43fd0ba159ebf0490aa71cecaf4de82f23
|
3 |
+
size 4921618624
|
model-00002-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c1744f2b8b28f28b2303e2e710a144efdcbcf195c810b1f04c9ec7eb380c2778
|
3 |
+
size 4915917672
|
model-00003-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:910c8dc802c84612eae3f61e3ca88c5045535ed32f3873bbc37c1ef31d88de47
|
3 |
+
size 4915917680
|
model-00004-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:70974e565d2e5a91414a0431daa7c3f81ec0ee3d31581f4e55fd5a15d4fb5131
|
3 |
+
size 380134008
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,694 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 15133495296
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"image_newline": "model-00001-of-00004.safetensors",
|
7 |
+
"language_model.lm_head.weight": "model-00004-of-00004.safetensors",
|
8 |
+
"language_model.model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
9 |
+
"language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
10 |
+
"language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
11 |
+
"language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
12 |
+
"language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
13 |
+
"language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
14 |
+
"language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
15 |
+
"language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
16 |
+
"language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
17 |
+
"language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
18 |
+
"language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
19 |
+
"language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
20 |
+
"language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
21 |
+
"language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
22 |
+
"language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
23 |
+
"language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
24 |
+
"language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
25 |
+
"language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
26 |
+
"language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
27 |
+
"language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
28 |
+
"language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
29 |
+
"language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
30 |
+
"language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
31 |
+
"language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
32 |
+
"language_model.model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
33 |
+
"language_model.model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
34 |
+
"language_model.model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
35 |
+
"language_model.model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
36 |
+
"language_model.model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
37 |
+
"language_model.model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
38 |
+
"language_model.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
39 |
+
"language_model.model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
40 |
+
"language_model.model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
41 |
+
"language_model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
42 |
+
"language_model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
43 |
+
"language_model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
44 |
+
"language_model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
45 |
+
"language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
46 |
+
"language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
47 |
+
"language_model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
48 |
+
"language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
49 |
+
"language_model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
50 |
+
"language_model.model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
51 |
+
"language_model.model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
52 |
+
"language_model.model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
53 |
+
"language_model.model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
54 |
+
"language_model.model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
55 |
+
"language_model.model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
56 |
+
"language_model.model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
57 |
+
"language_model.model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
58 |
+
"language_model.model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
59 |
+
"language_model.model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
60 |
+
"language_model.model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
61 |
+
"language_model.model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
62 |
+
"language_model.model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
63 |
+
"language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
64 |
+
"language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
65 |
+
"language_model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
66 |
+
"language_model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
67 |
+
"language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
68 |
+
"language_model.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
69 |
+
"language_model.model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
70 |
+
"language_model.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
71 |
+
"language_model.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
72 |
+
"language_model.model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
73 |
+
"language_model.model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
74 |
+
"language_model.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
75 |
+
"language_model.model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
76 |
+
"language_model.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
77 |
+
"language_model.model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
78 |
+
"language_model.model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
79 |
+
"language_model.model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
80 |
+
"language_model.model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
81 |
+
"language_model.model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
82 |
+
"language_model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
83 |
+
"language_model.model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
84 |
+
"language_model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
85 |
+
"language_model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
86 |
+
"language_model.model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
87 |
+
"language_model.model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
88 |
+
"language_model.model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
89 |
+
"language_model.model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
90 |
+
"language_model.model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
91 |
+
"language_model.model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
92 |
+
"language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
93 |
+
"language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
94 |
+
"language_model.model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
95 |
+
"language_model.model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
96 |
+
"language_model.model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
97 |
+
"language_model.model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
98 |
+
"language_model.model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
99 |
+
"language_model.model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
100 |
+
"language_model.model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
101 |
+
"language_model.model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
102 |
+
"language_model.model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
103 |
+
"language_model.model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
104 |
+
"language_model.model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
105 |
+
"language_model.model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
106 |
+
"language_model.model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
107 |
+
"language_model.model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
108 |
+
"language_model.model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
109 |
+
"language_model.model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
110 |
+
"language_model.model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
111 |
+
"language_model.model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
112 |
+
"language_model.model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
113 |
+
"language_model.model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
114 |
+
"language_model.model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
115 |
+
"language_model.model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
116 |
+
"language_model.model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
117 |
+
"language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
118 |
+
"language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
119 |
+
"language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
120 |
+
"language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
121 |
+
"language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
122 |
+
"language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
123 |
+
"language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
124 |
+
"language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
125 |
+
"language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
126 |
+
"language_model.model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
127 |
+
"language_model.model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
128 |
+
"language_model.model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
129 |
+
"language_model.model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
130 |
+
"language_model.model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
131 |
+
"language_model.model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
132 |
+
"language_model.model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
133 |
+
"language_model.model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
134 |
+
"language_model.model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
135 |
+
"language_model.model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
136 |
+
"language_model.model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
137 |
+
"language_model.model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
138 |
+
"language_model.model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
139 |
+
"language_model.model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
140 |
+
"language_model.model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
141 |
+
"language_model.model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
142 |
+
"language_model.model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
143 |
+
"language_model.model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
144 |
+
"language_model.model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
145 |
+
"language_model.model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
146 |
+
"language_model.model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
147 |
+
"language_model.model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
148 |
+
"language_model.model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
149 |
+
"language_model.model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
150 |
+
"language_model.model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
151 |
+
"language_model.model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
152 |
+
"language_model.model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
153 |
+
"language_model.model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
154 |
+
"language_model.model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
155 |
+
"language_model.model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
156 |
+
"language_model.model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
157 |
+
"language_model.model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
158 |
+
"language_model.model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
159 |
+
"language_model.model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
160 |
+
"language_model.model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
161 |
+
"language_model.model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
162 |
+
"language_model.model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
163 |
+
"language_model.model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
164 |
+
"language_model.model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
165 |
+
"language_model.model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
166 |
+
"language_model.model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
167 |
+
"language_model.model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
168 |
+
"language_model.model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
169 |
+
"language_model.model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
170 |
+
"language_model.model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
171 |
+
"language_model.model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
172 |
+
"language_model.model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
173 |
+
"language_model.model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
174 |
+
"language_model.model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
175 |
+
"language_model.model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
176 |
+
"language_model.model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
177 |
+
"language_model.model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
178 |
+
"language_model.model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
179 |
+
"language_model.model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
180 |
+
"language_model.model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
181 |
+
"language_model.model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
182 |
+
"language_model.model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
183 |
+
"language_model.model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
184 |
+
"language_model.model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
185 |
+
"language_model.model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
186 |
+
"language_model.model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
187 |
+
"language_model.model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
188 |
+
"language_model.model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
189 |
+
"language_model.model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
190 |
+
"language_model.model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
191 |
+
"language_model.model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
192 |
+
"language_model.model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
193 |
+
"language_model.model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
194 |
+
"language_model.model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
195 |
+
"language_model.model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
196 |
+
"language_model.model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
197 |
+
"language_model.model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
198 |
+
"language_model.model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
199 |
+
"language_model.model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
200 |
+
"language_model.model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
201 |
+
"language_model.model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
202 |
+
"language_model.model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
203 |
+
"language_model.model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
204 |
+
"language_model.model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
205 |
+
"language_model.model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
206 |
+
"language_model.model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
207 |
+
"language_model.model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
208 |
+
"language_model.model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
209 |
+
"language_model.model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
210 |
+
"language_model.model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
211 |
+
"language_model.model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
212 |
+
"language_model.model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
213 |
+
"language_model.model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
214 |
+
"language_model.model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
215 |
+
"language_model.model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
216 |
+
"language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
217 |
+
"language_model.model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
218 |
+
"language_model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
219 |
+
"language_model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
220 |
+
"language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
221 |
+
"language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
222 |
+
"language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
223 |
+
"language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
224 |
+
"language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
225 |
+
"language_model.model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
226 |
+
"language_model.model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
227 |
+
"language_model.model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
228 |
+
"language_model.model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
229 |
+
"language_model.model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
230 |
+
"language_model.model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
231 |
+
"language_model.model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
232 |
+
"language_model.model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
233 |
+
"language_model.model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
234 |
+
"language_model.model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
235 |
+
"language_model.model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
236 |
+
"language_model.model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
237 |
+
"language_model.model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
238 |
+
"language_model.model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
239 |
+
"language_model.model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
240 |
+
"language_model.model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
241 |
+
"language_model.model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
242 |
+
"language_model.model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
243 |
+
"language_model.model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
244 |
+
"language_model.model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
245 |
+
"language_model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
246 |
+
"language_model.model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
247 |
+
"language_model.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
248 |
+
"language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
249 |
+
"language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
250 |
+
"language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
251 |
+
"language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
252 |
+
"language_model.model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
253 |
+
"language_model.model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
254 |
+
"language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
255 |
+
"language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
256 |
+
"language_model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
257 |
+
"language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
258 |
+
"language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
259 |
+
"language_model.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
260 |
+
"language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
261 |
+
"language_model.model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
262 |
+
"language_model.model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
263 |
+
"language_model.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
264 |
+
"language_model.model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
265 |
+
"language_model.model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
266 |
+
"language_model.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
267 |
+
"language_model.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
268 |
+
"language_model.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
269 |
+
"language_model.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
270 |
+
"language_model.model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
271 |
+
"language_model.model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
272 |
+
"language_model.model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
273 |
+
"language_model.model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
274 |
+
"language_model.model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
275 |
+
"language_model.model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
276 |
+
"language_model.model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
277 |
+
"language_model.model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
278 |
+
"language_model.model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
279 |
+
"language_model.model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
280 |
+
"language_model.model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
281 |
+
"language_model.model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
282 |
+
"language_model.model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
283 |
+
"language_model.model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
284 |
+
"language_model.model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
285 |
+
"language_model.model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
286 |
+
"language_model.model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
287 |
+
"language_model.model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
288 |
+
"language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
289 |
+
"language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
290 |
+
"language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
291 |
+
"language_model.model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
292 |
+
"language_model.model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
293 |
+
"language_model.model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
294 |
+
"language_model.model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
295 |
+
"language_model.model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
296 |
+
"language_model.model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
297 |
+
"language_model.model.norm.weight": "model-00004-of-00004.safetensors",
|
298 |
+
"multi_modal_projector.linear_1.bias": "model-00001-of-00004.safetensors",
|
299 |
+
"multi_modal_projector.linear_1.weight": "model-00001-of-00004.safetensors",
|
300 |
+
"multi_modal_projector.linear_2.bias": "model-00001-of-00004.safetensors",
|
301 |
+
"multi_modal_projector.linear_2.weight": "model-00001-of-00004.safetensors",
|
302 |
+
"vision_tower.vision_model.embeddings.class_embedding": "model-00001-of-00004.safetensors",
|
303 |
+
"vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00001-of-00004.safetensors",
|
304 |
+
"vision_tower.vision_model.embeddings.position_embedding.weight": "model-00001-of-00004.safetensors",
|
305 |
+
"vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
306 |
+
"vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
307 |
+
"vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
308 |
+
"vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
309 |
+
"vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
310 |
+
"vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
311 |
+
"vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
312 |
+
"vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
313 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
314 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
315 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
316 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
317 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
318 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
319 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
320 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
321 |
+
"vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
322 |
+
"vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
323 |
+
"vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
324 |
+
"vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
325 |
+
"vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
326 |
+
"vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
327 |
+
"vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
328 |
+
"vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
329 |
+
"vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
330 |
+
"vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
331 |
+
"vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
332 |
+
"vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
333 |
+
"vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
334 |
+
"vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
335 |
+
"vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
336 |
+
"vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
337 |
+
"vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
338 |
+
"vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
339 |
+
"vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
340 |
+
"vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
341 |
+
"vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
342 |
+
"vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
343 |
+
"vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
344 |
+
"vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
345 |
+
"vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
346 |
+
"vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
347 |
+
"vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
348 |
+
"vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
349 |
+
"vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
350 |
+
"vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
351 |
+
"vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
352 |
+
"vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
353 |
+
"vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
354 |
+
"vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
355 |
+
"vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
356 |
+
"vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
357 |
+
"vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
358 |
+
"vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
359 |
+
"vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
360 |
+
"vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
361 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
362 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
363 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
364 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
365 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
366 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
367 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
368 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
369 |
+
"vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
370 |
+
"vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
371 |
+
"vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
372 |
+
"vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
373 |
+
"vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
374 |
+
"vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
375 |
+
"vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
376 |
+
"vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
377 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
378 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
379 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
380 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
381 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
382 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
383 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
384 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
385 |
+
"vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
386 |
+
"vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
387 |
+
"vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
388 |
+
"vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
389 |
+
"vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
390 |
+
"vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
391 |
+
"vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
392 |
+
"vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
393 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
394 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
395 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
396 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
397 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
398 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
399 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
400 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
401 |
+
"vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
402 |
+
"vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
403 |
+
"vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
404 |
+
"vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
405 |
+
"vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
406 |
+
"vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
407 |
+
"vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
408 |
+
"vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
409 |
+
"vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
410 |
+
"vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
411 |
+
"vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
412 |
+
"vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
413 |
+
"vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
414 |
+
"vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
415 |
+
"vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
416 |
+
"vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
417 |
+
"vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
418 |
+
"vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
419 |
+
"vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
420 |
+
"vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
421 |
+
"vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
422 |
+
"vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
423 |
+
"vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
424 |
+
"vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
425 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
426 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
427 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
428 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
429 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
430 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
431 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
432 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
433 |
+
"vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
434 |
+
"vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
435 |
+
"vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
436 |
+
"vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
437 |
+
"vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
438 |
+
"vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
439 |
+
"vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
440 |
+
"vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
441 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
442 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
443 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
444 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
445 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
446 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
447 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
448 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
449 |
+
"vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
450 |
+
"vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
451 |
+
"vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
452 |
+
"vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
453 |
+
"vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
454 |
+
"vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
455 |
+
"vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
456 |
+
"vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
457 |
+
"vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
458 |
+
"vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
459 |
+
"vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
460 |
+
"vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
461 |
+
"vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
462 |
+
"vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
463 |
+
"vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
464 |
+
"vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
465 |
+
"vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
466 |
+
"vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
467 |
+
"vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
468 |
+
"vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
469 |
+
"vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
470 |
+
"vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
471 |
+
"vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
472 |
+
"vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
473 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
474 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
475 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
476 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
477 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
478 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
479 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
480 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
481 |
+
"vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
482 |
+
"vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
483 |
+
"vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
484 |
+
"vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
485 |
+
"vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
486 |
+
"vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
487 |
+
"vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
488 |
+
"vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
489 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
490 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
491 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
492 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
493 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
494 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
495 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
496 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
497 |
+
"vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
498 |
+
"vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
499 |
+
"vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
500 |
+
"vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
501 |
+
"vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
502 |
+
"vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
503 |
+
"vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
504 |
+
"vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
505 |
+
"vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
506 |
+
"vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
507 |
+
"vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
508 |
+
"vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
509 |
+
"vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
510 |
+
"vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
511 |
+
"vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
512 |
+
"vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
513 |
+
"vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
514 |
+
"vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
515 |
+
"vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
516 |
+
"vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
517 |
+
"vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
518 |
+
"vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
519 |
+
"vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
520 |
+
"vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
521 |
+
"vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
522 |
+
"vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
523 |
+
"vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
524 |
+
"vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
525 |
+
"vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
526 |
+
"vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
527 |
+
"vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
528 |
+
"vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
529 |
+
"vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
530 |
+
"vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
531 |
+
"vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
532 |
+
"vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
533 |
+
"vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
534 |
+
"vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
535 |
+
"vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
536 |
+
"vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
537 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
538 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
539 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
540 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
541 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
542 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
543 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
544 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
545 |
+
"vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
546 |
+
"vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
547 |
+
"vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
548 |
+
"vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
549 |
+
"vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
550 |
+
"vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
551 |
+
"vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
552 |
+
"vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
553 |
+
"vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
554 |
+
"vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
555 |
+
"vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
556 |
+
"vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
557 |
+
"vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
558 |
+
"vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
559 |
+
"vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
560 |
+
"vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
561 |
+
"vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
562 |
+
"vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
563 |
+
"vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
564 |
+
"vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
565 |
+
"vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
566 |
+
"vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
567 |
+
"vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
568 |
+
"vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
569 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
570 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
571 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
572 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
573 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
574 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
575 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
576 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
577 |
+
"vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
578 |
+
"vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
579 |
+
"vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
580 |
+
"vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
581 |
+
"vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
582 |
+
"vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
583 |
+
"vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
584 |
+
"vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
585 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
586 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
587 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
588 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
589 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
590 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
591 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
592 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
593 |
+
"vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
594 |
+
"vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
595 |
+
"vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
596 |
+
"vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
597 |
+
"vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
598 |
+
"vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
599 |
+
"vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
600 |
+
"vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
601 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
602 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
603 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
604 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
605 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
606 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
607 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
608 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
609 |
+
"vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
610 |
+
"vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
611 |
+
"vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
612 |
+
"vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
613 |
+
"vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
614 |
+
"vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
615 |
+
"vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
616 |
+
"vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
617 |
+
"vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
618 |
+
"vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
619 |
+
"vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
620 |
+
"vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
621 |
+
"vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
622 |
+
"vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
623 |
+
"vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
624 |
+
"vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
625 |
+
"vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
626 |
+
"vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
627 |
+
"vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
628 |
+
"vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
629 |
+
"vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
630 |
+
"vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
631 |
+
"vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
632 |
+
"vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
633 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
634 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
635 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
636 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
637 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
638 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
639 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
640 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
641 |
+
"vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
642 |
+
"vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
643 |
+
"vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
644 |
+
"vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
645 |
+
"vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
646 |
+
"vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
647 |
+
"vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
648 |
+
"vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
649 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
650 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
651 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
652 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
653 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
654 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
655 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
656 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
657 |
+
"vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
658 |
+
"vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
659 |
+
"vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
660 |
+
"vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
661 |
+
"vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
662 |
+
"vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
663 |
+
"vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
664 |
+
"vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
665 |
+
"vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
666 |
+
"vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
667 |
+
"vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
668 |
+
"vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
669 |
+
"vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
670 |
+
"vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
671 |
+
"vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
672 |
+
"vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
673 |
+
"vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
674 |
+
"vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
675 |
+
"vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
676 |
+
"vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
677 |
+
"vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
678 |
+
"vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
679 |
+
"vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
680 |
+
"vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
681 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
682 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
683 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
684 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
685 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
686 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
687 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
688 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
689 |
+
"vision_tower.vision_model.post_layernorm.bias": "model-00001-of-00004.safetensors",
|
690 |
+
"vision_tower.vision_model.post_layernorm.weight": "model-00001-of-00004.safetensors",
|
691 |
+
"vision_tower.vision_model.pre_layrnorm.bias": "model-00001-of-00004.safetensors",
|
692 |
+
"vision_tower.vision_model.pre_layrnorm.weight": "model-00001-of-00004.safetensors"
|
693 |
+
}
|
694 |
+
}
|
preprocessor_config.json
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"aspect_ratio_setting": "anyres",
|
3 |
+
"crop_size": {
|
4 |
+
"height": 336,
|
5 |
+
"width": 336
|
6 |
+
},
|
7 |
+
"do_center_crop": true,
|
8 |
+
"do_convert_rgb": true,
|
9 |
+
"do_normalize": true,
|
10 |
+
"do_pad": true,
|
11 |
+
"do_rescale": true,
|
12 |
+
"do_resize": true,
|
13 |
+
"image_grid_pinpoints": [
|
14 |
+
[
|
15 |
+
336,
|
16 |
+
672
|
17 |
+
],
|
18 |
+
[
|
19 |
+
672,
|
20 |
+
336
|
21 |
+
],
|
22 |
+
[
|
23 |
+
672,
|
24 |
+
672
|
25 |
+
],
|
26 |
+
[
|
27 |
+
1008,
|
28 |
+
336
|
29 |
+
],
|
30 |
+
[
|
31 |
+
336,
|
32 |
+
1008
|
33 |
+
]
|
34 |
+
],
|
35 |
+
"image_mean": [
|
36 |
+
0.48145466,
|
37 |
+
0.4578275,
|
38 |
+
0.40821073
|
39 |
+
],
|
40 |
+
"image_processor_type": "LlavaNextImageProcessor",
|
41 |
+
"image_std": [
|
42 |
+
0.26862954,
|
43 |
+
0.26130258,
|
44 |
+
0.27577711
|
45 |
+
],
|
46 |
+
"processor_class": "LlavaNextProcessor",
|
47 |
+
"resample": 3,
|
48 |
+
"rescale_factor": 0.00392156862745098,
|
49 |
+
"size": {
|
50 |
+
"shortest_edge": 336
|
51 |
+
}
|
52 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "<pad>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"unk_token": {
|
24 |
+
"content": "<unk>",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
}
|
30 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
|
3 |
+
size 493443
|
tokenizer_config.json
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"add_prefix_space": null,
|
5 |
+
"added_tokens_decoder": {
|
6 |
+
"0": {
|
7 |
+
"content": "<unk>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": false,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false,
|
12 |
+
"special": true
|
13 |
+
},
|
14 |
+
"1": {
|
15 |
+
"content": "<s>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": false,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false,
|
20 |
+
"special": true
|
21 |
+
},
|
22 |
+
"2": {
|
23 |
+
"content": "</s>",
|
24 |
+
"lstrip": false,
|
25 |
+
"normalized": false,
|
26 |
+
"rstrip": false,
|
27 |
+
"single_word": false,
|
28 |
+
"special": true
|
29 |
+
},
|
30 |
+
"32000": {
|
31 |
+
"content": "<image>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false,
|
36 |
+
"special": true
|
37 |
+
},
|
38 |
+
"32001": {
|
39 |
+
"content": "<pad>",
|
40 |
+
"lstrip": false,
|
41 |
+
"normalized": false,
|
42 |
+
"rstrip": false,
|
43 |
+
"single_word": false,
|
44 |
+
"special": true
|
45 |
+
}
|
46 |
+
},
|
47 |
+
"additional_special_tokens": [],
|
48 |
+
"bos_token": "<s>",
|
49 |
+
"chat_template": "{{ '<s>' }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '`[INST] `' + content + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ content + '</s>' }}{% endif %}{% endfor %}",
|
50 |
+
"clean_up_tokenization_spaces": false,
|
51 |
+
"eos_token": "</s>",
|
52 |
+
"extra_special_tokens": {
|
53 |
+
"image_token": "<image>"
|
54 |
+
},
|
55 |
+
"image_token": "<image>",
|
56 |
+
"legacy": true,
|
57 |
+
"max_length": null,
|
58 |
+
"model_max_length": 1000000000000000019884624838656,
|
59 |
+
"pad_to_multiple_of": null,
|
60 |
+
"pad_token": "<pad>",
|
61 |
+
"pad_token_type_id": 0,
|
62 |
+
"padding_side": "right",
|
63 |
+
"processor_class": "LlavaNextProcessor",
|
64 |
+
"sp_model_kwargs": {},
|
65 |
+
"spaces_between_special_tokens": false,
|
66 |
+
"split_special_tokens": false,
|
67 |
+
"tokenizer_class": "LlamaTokenizer",
|
68 |
+
"unk_token": "<unk>",
|
69 |
+
"use_default_system_prompt": false
|
70 |
+
}
|
train_results.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 2.983177570093458,
|
3 |
+
"total_flos": 4704901791744000.0,
|
4 |
+
"train_loss": 0.3356622438084213,
|
5 |
+
"train_runtime": 15341.8197,
|
6 |
+
"train_samples_per_second": 6.686,
|
7 |
+
"train_steps_per_second": 0.026
|
8 |
+
}
|
trainer_log.jsonl
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"current_steps": 5, "total_steps": 399, "loss": 0.6911, "accuracy": 0.29374998807907104, "learning_rate": 5e-07, "epoch": 0.037383177570093455, "percentage": 1.25, "elapsed_time": "0:02:55", "remaining_time": "3:49:57"}
|
2 |
+
{"current_steps": 10, "total_steps": 399, "loss": 0.6571, "accuracy": 0.606249988079071, "learning_rate": 1e-06, "epoch": 0.07476635514018691, "percentage": 2.51, "elapsed_time": "0:05:43", "remaining_time": "3:42:55"}
|
3 |
+
{"current_steps": 15, "total_steps": 399, "loss": 0.6246, "accuracy": 0.6875, "learning_rate": 9.995924118521016e-07, "epoch": 0.11214953271028037, "percentage": 3.76, "elapsed_time": "0:08:31", "remaining_time": "3:38:09"}
|
4 |
+
{"current_steps": 20, "total_steps": 399, "loss": 0.6793, "accuracy": 0.6937500238418579, "learning_rate": 9.983703119207998e-07, "epoch": 0.14953271028037382, "percentage": 5.01, "elapsed_time": "0:11:19", "remaining_time": "3:34:39"}
|
5 |
+
{"current_steps": 25, "total_steps": 399, "loss": 0.6303, "accuracy": 0.6625000238418579, "learning_rate": 9.963356926598848e-07, "epoch": 0.18691588785046728, "percentage": 6.27, "elapsed_time": "0:14:07", "remaining_time": "3:31:25"}
|
6 |
+
{"current_steps": 30, "total_steps": 399, "loss": 0.5921, "accuracy": 0.706250011920929, "learning_rate": 9.934918712161414e-07, "epoch": 0.22429906542056074, "percentage": 7.52, "elapsed_time": "0:16:55", "remaining_time": "3:28:09"}
|
7 |
+
{"current_steps": 35, "total_steps": 399, "loss": 0.5949, "accuracy": 0.7437499761581421, "learning_rate": 9.898434840212305e-07, "epoch": 0.2616822429906542, "percentage": 8.77, "elapsed_time": "0:19:43", "remaining_time": "3:25:05"}
|
8 |
+
{"current_steps": 40, "total_steps": 399, "loss": 0.6446, "accuracy": 0.6875, "learning_rate": 9.853964792326704e-07, "epoch": 0.29906542056074764, "percentage": 10.03, "elapsed_time": "0:22:31", "remaining_time": "3:22:10"}
|
9 |
+
{"current_steps": 45, "total_steps": 399, "loss": 0.5967, "accuracy": 0.762499988079071, "learning_rate": 9.80158107036243e-07, "epoch": 0.3364485981308411, "percentage": 11.28, "elapsed_time": "0:25:20", "remaining_time": "3:19:19"}
|
10 |
+
{"current_steps": 50, "total_steps": 399, "loss": 0.577, "accuracy": 0.6875, "learning_rate": 9.741369078256344e-07, "epoch": 0.37383177570093457, "percentage": 12.53, "elapsed_time": "0:28:09", "remaining_time": "3:16:36"}
|
11 |
+
{"current_steps": 50, "total_steps": 399, "eval_loss": 0.578223705291748, "epoch": 0.37383177570093457, "percentage": 12.53, "elapsed_time": "0:32:22", "remaining_time": "3:46:01"}
|
12 |
+
{"current_steps": 55, "total_steps": 399, "loss": 0.5854, "accuracy": 0.731249988079071, "learning_rate": 9.673426982785825e-07, "epoch": 0.411214953271028, "percentage": 13.78, "elapsed_time": "0:35:10", "remaining_time": "3:40:00"}
|
13 |
+
{"current_steps": 60, "total_steps": 399, "loss": 0.5468, "accuracy": 0.699999988079071, "learning_rate": 9.597865553522297e-07, "epoch": 0.4485981308411215, "percentage": 15.04, "elapsed_time": "0:37:59", "remaining_time": "3:34:37"}
|
14 |
+
{"current_steps": 65, "total_steps": 399, "loss": 0.5918, "accuracy": 0.75, "learning_rate": 9.514807982237785e-07, "epoch": 0.48598130841121495, "percentage": 16.29, "elapsed_time": "0:40:48", "remaining_time": "3:29:42"}
|
15 |
+
{"current_steps": 70, "total_steps": 399, "loss": 0.5295, "accuracy": 0.7250000238418579, "learning_rate": 9.424389682058886e-07, "epoch": 0.5233644859813084, "percentage": 17.54, "elapsed_time": "0:43:37", "remaining_time": "3:25:02"}
|
16 |
+
{"current_steps": 75, "total_steps": 399, "loss": 0.5487, "accuracy": 0.737500011920929, "learning_rate": 9.326758066695624e-07, "epoch": 0.5607476635514018, "percentage": 18.8, "elapsed_time": "0:46:24", "remaining_time": "3:20:28"}
|
17 |
+
{"current_steps": 80, "total_steps": 399, "loss": 0.6152, "accuracy": 0.71875, "learning_rate": 9.222072310105126e-07, "epoch": 0.5981308411214953, "percentage": 20.05, "elapsed_time": "0:49:13", "remaining_time": "3:16:15"}
|
18 |
+
{"current_steps": 85, "total_steps": 399, "loss": 0.553, "accuracy": 0.800000011920929, "learning_rate": 9.110503086981955e-07, "epoch": 0.6355140186915887, "percentage": 21.3, "elapsed_time": "0:52:00", "remaining_time": "3:12:06"}
|
19 |
+
{"current_steps": 90, "total_steps": 399, "loss": 0.5359, "accuracy": 0.7875000238418579, "learning_rate": 8.992232294498169e-07, "epoch": 0.6728971962616822, "percentage": 22.56, "elapsed_time": "0:54:48", "remaining_time": "3:08:10"}
|
20 |
+
{"current_steps": 95, "total_steps": 399, "loss": 0.4889, "accuracy": 0.75, "learning_rate": 8.867452755746805e-07, "epoch": 0.7102803738317757, "percentage": 23.81, "elapsed_time": "0:57:36", "remaining_time": "3:04:20"}
|
21 |
+
{"current_steps": 100, "total_steps": 399, "loss": 0.5388, "accuracy": 0.7749999761581421, "learning_rate": 8.736367905372246e-07, "epoch": 0.7476635514018691, "percentage": 25.06, "elapsed_time": "1:00:25", "remaining_time": "3:00:39"}
|
22 |
+
{"current_steps": 100, "total_steps": 399, "eval_loss": 0.5390673875808716, "epoch": 0.7476635514018691, "percentage": 25.06, "elapsed_time": "1:04:37", "remaining_time": "3:13:14"}
|
23 |
+
{"current_steps": 105, "total_steps": 399, "loss": 0.5277, "accuracy": 0.793749988079071, "learning_rate": 8.599191457900016e-07, "epoch": 0.7850467289719626, "percentage": 26.32, "elapsed_time": "1:07:56", "remaining_time": "3:10:14"}
|
24 |
+
{"current_steps": 110, "total_steps": 399, "loss": 0.5658, "accuracy": 0.7875000238418579, "learning_rate": 8.456147059306757e-07, "epoch": 0.822429906542056, "percentage": 27.57, "elapsed_time": "1:10:44", "remaining_time": "3:05:51"}
|
25 |
+
{"current_steps": 115, "total_steps": 399, "loss": 0.5152, "accuracy": 0.800000011920929, "learning_rate": 8.307467922398432e-07, "epoch": 0.8598130841121495, "percentage": 28.82, "elapsed_time": "1:13:33", "remaining_time": "3:01:40"}
|
26 |
+
{"current_steps": 120, "total_steps": 399, "loss": 0.5595, "accuracy": 0.78125, "learning_rate": 8.15339644659121e-07, "epoch": 0.897196261682243, "percentage": 30.08, "elapsed_time": "1:16:23", "remaining_time": "2:57:36"}
|
27 |
+
{"current_steps": 125, "total_steps": 399, "loss": 0.4636, "accuracy": 0.7875000238418579, "learning_rate": 7.994183822714968e-07, "epoch": 0.9345794392523364, "percentage": 31.33, "elapsed_time": "1:19:12", "remaining_time": "2:53:37"}
|
28 |
+
{"current_steps": 130, "total_steps": 399, "loss": 0.4432, "accuracy": 0.7875000238418579, "learning_rate": 7.830089623483656e-07, "epoch": 0.9719626168224299, "percentage": 32.58, "elapsed_time": "1:22:00", "remaining_time": "2:49:41"}
|
29 |
+
{"current_steps": 135, "total_steps": 399, "loss": 0.4499, "accuracy": 0.8500000238418579, "learning_rate": 7.661381380300253e-07, "epoch": 1.0093457943925233, "percentage": 33.83, "elapsed_time": "1:24:50", "remaining_time": "2:45:54"}
|
30 |
+
{"current_steps": 140, "total_steps": 399, "loss": 0.2627, "accuracy": 0.893750011920929, "learning_rate": 7.488334147086263e-07, "epoch": 1.0467289719626167, "percentage": 35.09, "elapsed_time": "1:27:39", "remaining_time": "2:42:09"}
|
31 |
+
{"current_steps": 145, "total_steps": 399, "loss": 0.2753, "accuracy": 0.925000011920929, "learning_rate": 7.311230051846819e-07, "epoch": 1.0841121495327102, "percentage": 36.34, "elapsed_time": "1:30:29", "remaining_time": "2:38:31"}
|
32 |
+
{"current_steps": 150, "total_steps": 399, "loss": 0.2653, "accuracy": 0.90625, "learning_rate": 7.130357836702577e-07, "epoch": 1.1214953271028036, "percentage": 37.59, "elapsed_time": "1:33:17", "remaining_time": "2:34:52"}
|
33 |
+
{"current_steps": 150, "total_steps": 399, "eval_loss": 0.5246723890304565, "epoch": 1.1214953271028036, "percentage": 37.59, "elapsed_time": "1:37:30", "remaining_time": "2:41:52"}
|
34 |
+
{"current_steps": 155, "total_steps": 399, "loss": 0.2499, "accuracy": 0.8500000238418579, "learning_rate": 6.946012387138247e-07, "epoch": 1.158878504672897, "percentage": 38.85, "elapsed_time": "1:40:18", "remaining_time": "2:37:55"}
|
35 |
+
{"current_steps": 160, "total_steps": 399, "loss": 0.2442, "accuracy": 0.8999999761581421, "learning_rate": 6.758494251235274e-07, "epoch": 1.1962616822429906, "percentage": 40.1, "elapsed_time": "1:43:07", "remaining_time": "2:34:02"}
|
36 |
+
{"current_steps": 165, "total_steps": 399, "loss": 0.2703, "accuracy": 0.9125000238418579, "learning_rate": 6.568109149672496e-07, "epoch": 1.233644859813084, "percentage": 41.35, "elapsed_time": "1:45:56", "remaining_time": "2:30:14"}
|
37 |
+
{"current_steps": 170, "total_steps": 399, "loss": 0.2725, "accuracy": 0.8999999761581421, "learning_rate": 6.375167477293648e-07, "epoch": 1.2710280373831775, "percentage": 42.61, "elapsed_time": "1:48:43", "remaining_time": "2:26:27"}
|
38 |
+
{"current_steps": 175, "total_steps": 399, "loss": 0.2646, "accuracy": 0.925000011920929, "learning_rate": 6.179983797054321e-07, "epoch": 1.308411214953271, "percentage": 43.86, "elapsed_time": "1:51:31", "remaining_time": "2:22:45"}
|
39 |
+
{"current_steps": 180, "total_steps": 399, "loss": 0.3069, "accuracy": 0.862500011920929, "learning_rate": 5.982876327173427e-07, "epoch": 1.3457943925233644, "percentage": 45.11, "elapsed_time": "1:54:20", "remaining_time": "2:19:07"}
|
40 |
+
{"current_steps": 185, "total_steps": 399, "loss": 0.2444, "accuracy": 0.875, "learning_rate": 5.78416642232531e-07, "epoch": 1.3831775700934579, "percentage": 46.37, "elapsed_time": "1:57:09", "remaining_time": "2:15:31"}
|
41 |
+
{"current_steps": 190, "total_steps": 399, "loss": 0.2604, "accuracy": 0.875, "learning_rate": 5.584178049718314e-07, "epoch": 1.4205607476635513, "percentage": 47.62, "elapsed_time": "1:59:57", "remaining_time": "2:11:57"}
|
42 |
+
{"current_steps": 195, "total_steps": 399, "loss": 0.3042, "accuracy": 0.8374999761581421, "learning_rate": 5.38323726091401e-07, "epoch": 1.4579439252336448, "percentage": 48.87, "elapsed_time": "2:02:46", "remaining_time": "2:08:26"}
|
43 |
+
{"current_steps": 200, "total_steps": 399, "loss": 0.2571, "accuracy": 0.893750011920929, "learning_rate": 5.181671660248178e-07, "epoch": 1.4953271028037383, "percentage": 50.13, "elapsed_time": "2:05:34", "remaining_time": "2:04:57"}
|
44 |
+
{"current_steps": 200, "total_steps": 399, "eval_loss": 0.5108085870742798, "epoch": 1.4953271028037383, "percentage": 50.13, "elapsed_time": "2:09:47", "remaining_time": "2:09:08"}
|
45 |
+
{"current_steps": 205, "total_steps": 399, "loss": 0.2725, "accuracy": 0.8812500238418579, "learning_rate": 4.979809870720242e-07, "epoch": 1.5327102803738317, "percentage": 51.38, "elapsed_time": "2:13:08", "remaining_time": "2:06:00"}
|
46 |
+
{"current_steps": 210, "total_steps": 399, "loss": 0.283, "accuracy": 0.887499988079071, "learning_rate": 4.777980998221901e-07, "epoch": 1.5700934579439252, "percentage": 52.63, "elapsed_time": "2:15:57", "remaining_time": "2:02:21"}
|
47 |
+
{"current_steps": 215, "total_steps": 399, "loss": 0.3058, "accuracy": 0.918749988079071, "learning_rate": 4.5765140949784923e-07, "epoch": 1.6074766355140186, "percentage": 53.88, "elapsed_time": "2:18:46", "remaining_time": "1:58:46"}
|
48 |
+
{"current_steps": 220, "total_steps": 399, "loss": 0.2327, "accuracy": 0.8999999761581421, "learning_rate": 4.3757376230778383e-07, "epoch": 1.644859813084112, "percentage": 55.14, "elapsed_time": "2:21:36", "remaining_time": "1:55:12"}
|
49 |
+
{"current_steps": 225, "total_steps": 399, "loss": 0.2888, "accuracy": 0.9125000238418579, "learning_rate": 4.1759789189612333e-07, "epoch": 1.6822429906542056, "percentage": 56.39, "elapsed_time": "2:24:24", "remaining_time": "1:51:40"}
|
50 |
+
{"current_steps": 230, "total_steps": 399, "loss": 0.2588, "accuracy": 0.862500011920929, "learning_rate": 3.9775636597496285e-07, "epoch": 1.719626168224299, "percentage": 57.64, "elapsed_time": "2:27:11", "remaining_time": "1:48:09"}
|
51 |
+
{"current_steps": 235, "total_steps": 399, "loss": 0.2849, "accuracy": 0.9125000238418579, "learning_rate": 3.7808153322750893e-07, "epoch": 1.7570093457943925, "percentage": 58.9, "elapsed_time": "2:29:59", "remaining_time": "1:44:40"}
|
52 |
+
{"current_steps": 240, "total_steps": 399, "loss": 0.2909, "accuracy": 0.893750011920929, "learning_rate": 3.586054705683208e-07, "epoch": 1.794392523364486, "percentage": 60.15, "elapsed_time": "2:32:47", "remaining_time": "1:41:13"}
|
53 |
+
{"current_steps": 245, "total_steps": 399, "loss": 0.282, "accuracy": 0.887499988079071, "learning_rate": 3.393599308466285e-07, "epoch": 1.8317757009345794, "percentage": 61.4, "elapsed_time": "2:35:35", "remaining_time": "1:37:48"}
|
54 |
+
{"current_steps": 250, "total_steps": 399, "loss": 0.2803, "accuracy": 0.887499988079071, "learning_rate": 3.203762910779944e-07, "epoch": 1.8691588785046729, "percentage": 62.66, "elapsed_time": "2:38:23", "remaining_time": "1:34:24"}
|
55 |
+
{"current_steps": 250, "total_steps": 399, "eval_loss": 0.48170700669288635, "epoch": 1.8691588785046729, "percentage": 62.66, "elapsed_time": "2:42:36", "remaining_time": "1:36:54"}
|
56 |
+
{"current_steps": 255, "total_steps": 399, "loss": 0.2684, "accuracy": 0.893750011920929, "learning_rate": 3.0168550128871264e-07, "epoch": 1.9065420560747663, "percentage": 63.91, "elapsed_time": "2:45:24", "remaining_time": "1:33:24"}
|
57 |
+
{"current_steps": 260, "total_steps": 399, "loss": 0.2788, "accuracy": 0.824999988079071, "learning_rate": 2.833180340563554e-07, "epoch": 1.9439252336448598, "percentage": 65.16, "elapsed_time": "2:48:11", "remaining_time": "1:29:55"}
|
58 |
+
{"current_steps": 265, "total_steps": 399, "loss": 0.2624, "accuracy": 0.893750011920929, "learning_rate": 2.653038348287261e-07, "epoch": 1.9813084112149533, "percentage": 66.42, "elapsed_time": "2:50:59", "remaining_time": "1:26:27"}
|
59 |
+
{"current_steps": 270, "total_steps": 399, "loss": 0.2462, "accuracy": 0.925000011920929, "learning_rate": 2.476722731022207e-07, "epoch": 2.0186915887850465, "percentage": 67.67, "elapsed_time": "2:53:48", "remaining_time": "1:23:02"}
|
60 |
+
{"current_steps": 275, "total_steps": 399, "loss": 0.1566, "accuracy": 0.9437500238418579, "learning_rate": 2.3045209453919407e-07, "epoch": 2.05607476635514, "percentage": 68.92, "elapsed_time": "2:56:35", "remaining_time": "1:19:37"}
|
61 |
+
{"current_steps": 280, "total_steps": 399, "loss": 0.1735, "accuracy": 0.956250011920929, "learning_rate": 2.13671374102394e-07, "epoch": 2.0934579439252334, "percentage": 70.18, "elapsed_time": "2:59:23", "remaining_time": "1:16:14"}
|
62 |
+
{"current_steps": 285, "total_steps": 399, "loss": 0.1593, "accuracy": 0.949999988079071, "learning_rate": 1.9735747028287342e-07, "epoch": 2.130841121495327, "percentage": 71.43, "elapsed_time": "3:02:13", "remaining_time": "1:12:53"}
|
63 |
+
{"current_steps": 290, "total_steps": 399, "loss": 0.1619, "accuracy": 0.90625, "learning_rate": 1.815369804960034e-07, "epoch": 2.1682242990654204, "percentage": 72.68, "elapsed_time": "3:05:01", "remaining_time": "1:09:32"}
|
64 |
+
{"current_steps": 295, "total_steps": 399, "loss": 0.1841, "accuracy": 0.925000011920929, "learning_rate": 1.6623569771830852e-07, "epoch": 2.205607476635514, "percentage": 73.93, "elapsed_time": "3:07:49", "remaining_time": "1:06:12"}
|
65 |
+
{"current_steps": 300, "total_steps": 399, "loss": 0.1739, "accuracy": 0.9624999761581421, "learning_rate": 1.5147856843582002e-07, "epoch": 2.2429906542056073, "percentage": 75.19, "elapsed_time": "3:10:37", "remaining_time": "1:02:54"}
|
66 |
+
{"current_steps": 300, "total_steps": 399, "eval_loss": 0.49124717712402344, "epoch": 2.2429906542056073, "percentage": 75.19, "elapsed_time": "3:14:50", "remaining_time": "1:04:17"}
|
67 |
+
{"current_steps": 305, "total_steps": 399, "loss": 0.1359, "accuracy": 0.949999988079071, "learning_rate": 1.3728965197250781e-07, "epoch": 2.2803738317757007, "percentage": 76.44, "elapsed_time": "3:18:09", "remaining_time": "1:01:04"}
|
68 |
+
{"current_steps": 310, "total_steps": 399, "loss": 0.1667, "accuracy": 0.918749988079071, "learning_rate": 1.236920812651003e-07, "epoch": 2.317757009345794, "percentage": 77.69, "elapsed_time": "3:20:57", "remaining_time": "0:57:41"}
|
69 |
+
{"current_steps": 315, "total_steps": 399, "loss": 0.1652, "accuracy": 0.925000011920929, "learning_rate": 1.1070802514823913e-07, "epoch": 2.3551401869158877, "percentage": 78.95, "elapsed_time": "3:23:45", "remaining_time": "0:54:20"}
|
70 |
+
{"current_steps": 320, "total_steps": 399, "loss": 0.1591, "accuracy": 0.96875, "learning_rate": 9.835865221146389e-08, "epoch": 2.392523364485981, "percentage": 80.2, "elapsed_time": "3:26:33", "remaining_time": "0:50:59"}
|
71 |
+
{"current_steps": 325, "total_steps": 399, "loss": 0.1595, "accuracy": 0.949999988079071, "learning_rate": 8.666409628694693e-08, "epoch": 2.4299065420560746, "percentage": 81.45, "elapsed_time": "3:29:22", "remaining_time": "0:47:40"}
|
72 |
+
{"current_steps": 330, "total_steps": 399, "loss": 0.1524, "accuracy": 0.9437500238418579, "learning_rate": 7.564342362424713e-08, "epoch": 2.467289719626168, "percentage": 82.71, "elapsed_time": "3:32:13", "remaining_time": "0:44:22"}
|
73 |
+
{"current_steps": 335, "total_steps": 399, "loss": 0.1497, "accuracy": 0.9375, "learning_rate": 6.53146018056011e-08, "epoch": 2.5046728971962615, "percentage": 83.96, "elapsed_time": "3:35:01", "remaining_time": "0:41:04"}
|
74 |
+
{"current_steps": 340, "total_steps": 399, "loss": 0.1615, "accuracy": 0.90625, "learning_rate": 5.569447045242931e-08, "epoch": 2.542056074766355, "percentage": 85.21, "elapsed_time": "3:37:49", "remaining_time": "0:37:48"}
|
75 |
+
{"current_steps": 345, "total_steps": 399, "loss": 0.1534, "accuracy": 0.9437500238418579, "learning_rate": 4.6798713770814625e-08, "epoch": 2.5794392523364484, "percentage": 86.47, "elapsed_time": "3:40:37", "remaining_time": "0:34:32"}
|
76 |
+
{"current_steps": 350, "total_steps": 399, "loss": 0.1631, "accuracy": 0.949999988079071, "learning_rate": 3.864183498071699e-08, "epoch": 2.616822429906542, "percentage": 87.72, "elapsed_time": "3:43:25", "remaining_time": "0:31:16"}
|
77 |
+
{"current_steps": 350, "total_steps": 399, "eval_loss": 0.49646082520484924, "epoch": 2.616822429906542, "percentage": 87.72, "elapsed_time": "3:47:37", "remaining_time": "0:31:52"}
|
78 |
+
{"current_steps": 355, "total_steps": 399, "loss": 0.1623, "accuracy": 0.9312499761581421, "learning_rate": 3.1237132670611455e-08, "epoch": 2.6542056074766354, "percentage": 88.97, "elapsed_time": "3:50:25", "remaining_time": "0:28:33"}
|
79 |
+
{"current_steps": 360, "total_steps": 399, "loss": 0.1735, "accuracy": 0.9312499761581421, "learning_rate": 2.4596679116099083e-08, "epoch": 2.691588785046729, "percentage": 90.23, "elapsed_time": "3:53:13", "remaining_time": "0:25:15"}
|
80 |
+
{"current_steps": 365, "total_steps": 399, "loss": 0.161, "accuracy": 0.9375, "learning_rate": 1.8731300597841837e-08, "epoch": 2.7289719626168223, "percentage": 91.48, "elapsed_time": "3:56:01", "remaining_time": "0:21:59"}
|
81 |
+
{"current_steps": 370, "total_steps": 399, "loss": 0.1686, "accuracy": 0.9375, "learning_rate": 1.365055975090773e-08, "epoch": 2.7663551401869158, "percentage": 92.73, "elapsed_time": "3:58:48", "remaining_time": "0:18:43"}
|
82 |
+
{"current_steps": 375, "total_steps": 399, "loss": 0.1565, "accuracy": 0.9937499761581421, "learning_rate": 9.362739974303757e-09, "epoch": 2.803738317757009, "percentage": 93.98, "elapsed_time": "4:01:37", "remaining_time": "0:15:27"}
|
83 |
+
{"current_steps": 380, "total_steps": 399, "loss": 0.1504, "accuracy": 0.9375, "learning_rate": 5.874831926114931e-09, "epoch": 2.8411214953271027, "percentage": 95.24, "elapsed_time": "4:04:24", "remaining_time": "0:12:13"}
|
84 |
+
{"current_steps": 385, "total_steps": 399, "loss": 0.1641, "accuracy": 0.956250011920929, "learning_rate": 3.192522126266861e-09, "epoch": 2.878504672897196, "percentage": 96.49, "elapsed_time": "4:07:13", "remaining_time": "0:08:59"}
|
85 |
+
{"current_steps": 390, "total_steps": 399, "loss": 0.1715, "accuracy": 0.96875, "learning_rate": 1.3201836854931924e-09, "epoch": 2.9158878504672896, "percentage": 97.74, "elapsed_time": "4:10:03", "remaining_time": "0:05:46"}
|
86 |
+
{"current_steps": 395, "total_steps": 399, "loss": 0.1565, "accuracy": 0.925000011920929, "learning_rate": 2.6086917562317957e-10, "epoch": 2.953271028037383, "percentage": 99.0, "elapsed_time": "4:12:52", "remaining_time": "0:02:33"}
|
87 |
+
{"current_steps": 399, "total_steps": 399, "epoch": 2.983177570093458, "percentage": 100.0, "elapsed_time": "4:15:39", "remaining_time": "0:00:00"}
|
trainer_state.json
ADDED
@@ -0,0 +1,1339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 2.983177570093458,
|
5 |
+
"eval_steps": 50,
|
6 |
+
"global_step": 399,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.037383177570093455,
|
13 |
+
"grad_norm": 53.10315006693553,
|
14 |
+
"learning_rate": 5e-07,
|
15 |
+
"logits/chosen": -2.7264351844787598,
|
16 |
+
"logits/rejected": -2.7314915657043457,
|
17 |
+
"logps/chosen": -233.46450805664062,
|
18 |
+
"logps/rejected": -215.2651824951172,
|
19 |
+
"loss": 0.6911,
|
20 |
+
"rewards/accuracies": 0.29374998807907104,
|
21 |
+
"rewards/chosen": 0.011523213237524033,
|
22 |
+
"rewards/margins": 0.00106804131064564,
|
23 |
+
"rewards/rejected": 0.010455173440277576,
|
24 |
+
"step": 5
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"epoch": 0.07476635514018691,
|
28 |
+
"grad_norm": 47.36575434115236,
|
29 |
+
"learning_rate": 1e-06,
|
30 |
+
"logits/chosen": -2.7007861137390137,
|
31 |
+
"logits/rejected": -2.6771092414855957,
|
32 |
+
"logps/chosen": -243.54736328125,
|
33 |
+
"logps/rejected": -216.7264404296875,
|
34 |
+
"loss": 0.6571,
|
35 |
+
"rewards/accuracies": 0.606249988079071,
|
36 |
+
"rewards/chosen": 0.35093382000923157,
|
37 |
+
"rewards/margins": 0.08091190457344055,
|
38 |
+
"rewards/rejected": 0.270021915435791,
|
39 |
+
"step": 10
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"epoch": 0.11214953271028037,
|
43 |
+
"grad_norm": 47.16978215311432,
|
44 |
+
"learning_rate": 9.995924118521016e-07,
|
45 |
+
"logits/chosen": -2.430677652359009,
|
46 |
+
"logits/rejected": -2.3965601921081543,
|
47 |
+
"logps/chosen": -245.1031951904297,
|
48 |
+
"logps/rejected": -206.2293701171875,
|
49 |
+
"loss": 0.6246,
|
50 |
+
"rewards/accuracies": 0.6875,
|
51 |
+
"rewards/chosen": 1.259174108505249,
|
52 |
+
"rewards/margins": 0.5380627512931824,
|
53 |
+
"rewards/rejected": 0.7211112380027771,
|
54 |
+
"step": 15
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"epoch": 0.14953271028037382,
|
58 |
+
"grad_norm": 43.635723418114395,
|
59 |
+
"learning_rate": 9.983703119207998e-07,
|
60 |
+
"logits/chosen": -2.1696972846984863,
|
61 |
+
"logits/rejected": -2.1355605125427246,
|
62 |
+
"logps/chosen": -241.0774383544922,
|
63 |
+
"logps/rejected": -203.36190795898438,
|
64 |
+
"loss": 0.6793,
|
65 |
+
"rewards/accuracies": 0.6937500238418579,
|
66 |
+
"rewards/chosen": 1.169782280921936,
|
67 |
+
"rewards/margins": 0.6064848899841309,
|
68 |
+
"rewards/rejected": 0.5632972717285156,
|
69 |
+
"step": 20
|
70 |
+
},
|
71 |
+
{
|
72 |
+
"epoch": 0.18691588785046728,
|
73 |
+
"grad_norm": 48.55163726023584,
|
74 |
+
"learning_rate": 9.963356926598848e-07,
|
75 |
+
"logits/chosen": -2.0636093616485596,
|
76 |
+
"logits/rejected": -2.068882942199707,
|
77 |
+
"logps/chosen": -245.859130859375,
|
78 |
+
"logps/rejected": -225.98214721679688,
|
79 |
+
"loss": 0.6303,
|
80 |
+
"rewards/accuracies": 0.6625000238418579,
|
81 |
+
"rewards/chosen": 1.4211041927337646,
|
82 |
+
"rewards/margins": 0.8779617547988892,
|
83 |
+
"rewards/rejected": 0.5431426167488098,
|
84 |
+
"step": 25
|
85 |
+
},
|
86 |
+
{
|
87 |
+
"epoch": 0.22429906542056074,
|
88 |
+
"grad_norm": 42.28573315120618,
|
89 |
+
"learning_rate": 9.934918712161414e-07,
|
90 |
+
"logits/chosen": -2.1142563819885254,
|
91 |
+
"logits/rejected": -2.0855462551116943,
|
92 |
+
"logps/chosen": -239.36471557617188,
|
93 |
+
"logps/rejected": -208.6727294921875,
|
94 |
+
"loss": 0.5921,
|
95 |
+
"rewards/accuracies": 0.706250011920929,
|
96 |
+
"rewards/chosen": 0.9205316305160522,
|
97 |
+
"rewards/margins": 0.793667733669281,
|
98 |
+
"rewards/rejected": 0.12686386704444885,
|
99 |
+
"step": 30
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"epoch": 0.2616822429906542,
|
103 |
+
"grad_norm": 43.204924277825036,
|
104 |
+
"learning_rate": 9.898434840212305e-07,
|
105 |
+
"logits/chosen": -2.1356260776519775,
|
106 |
+
"logits/rejected": -2.0982658863067627,
|
107 |
+
"logps/chosen": -249.84848022460938,
|
108 |
+
"logps/rejected": -232.4295196533203,
|
109 |
+
"loss": 0.5949,
|
110 |
+
"rewards/accuracies": 0.7437499761581421,
|
111 |
+
"rewards/chosen": 0.6800674200057983,
|
112 |
+
"rewards/margins": 0.8049441576004028,
|
113 |
+
"rewards/rejected": -0.12487666308879852,
|
114 |
+
"step": 35
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"epoch": 0.29906542056074764,
|
118 |
+
"grad_norm": 38.34647398013497,
|
119 |
+
"learning_rate": 9.853964792326704e-07,
|
120 |
+
"logits/chosen": -2.1388490200042725,
|
121 |
+
"logits/rejected": -2.106875419616699,
|
122 |
+
"logps/chosen": -231.1354217529297,
|
123 |
+
"logps/rejected": -210.77197265625,
|
124 |
+
"loss": 0.6446,
|
125 |
+
"rewards/accuracies": 0.6875,
|
126 |
+
"rewards/chosen": 0.6789754033088684,
|
127 |
+
"rewards/margins": 1.0355161428451538,
|
128 |
+
"rewards/rejected": -0.35654082894325256,
|
129 |
+
"step": 40
|
130 |
+
},
|
131 |
+
{
|
132 |
+
"epoch": 0.3364485981308411,
|
133 |
+
"grad_norm": 44.46300001215897,
|
134 |
+
"learning_rate": 9.80158107036243e-07,
|
135 |
+
"logits/chosen": -2.182988405227661,
|
136 |
+
"logits/rejected": -2.139224052429199,
|
137 |
+
"logps/chosen": -253.672119140625,
|
138 |
+
"logps/rejected": -198.7143096923828,
|
139 |
+
"loss": 0.5967,
|
140 |
+
"rewards/accuracies": 0.762499988079071,
|
141 |
+
"rewards/chosen": 0.711867094039917,
|
142 |
+
"rewards/margins": 0.8050382733345032,
|
143 |
+
"rewards/rejected": -0.093171127140522,
|
144 |
+
"step": 45
|
145 |
+
},
|
146 |
+
{
|
147 |
+
"epoch": 0.37383177570093457,
|
148 |
+
"grad_norm": 36.368588934194,
|
149 |
+
"learning_rate": 9.741369078256344e-07,
|
150 |
+
"logits/chosen": -2.1803622245788574,
|
151 |
+
"logits/rejected": -2.1714465618133545,
|
152 |
+
"logps/chosen": -229.3830108642578,
|
153 |
+
"logps/rejected": -214.208251953125,
|
154 |
+
"loss": 0.577,
|
155 |
+
"rewards/accuracies": 0.6875,
|
156 |
+
"rewards/chosen": 0.7632301449775696,
|
157 |
+
"rewards/margins": 0.9728155136108398,
|
158 |
+
"rewards/rejected": -0.20958539843559265,
|
159 |
+
"step": 50
|
160 |
+
},
|
161 |
+
{
|
162 |
+
"epoch": 0.37383177570093457,
|
163 |
+
"eval_logits/chosen": -2.168900966644287,
|
164 |
+
"eval_logits/rejected": -2.154597759246826,
|
165 |
+
"eval_logps/chosen": -240.57708740234375,
|
166 |
+
"eval_logps/rejected": -220.16871643066406,
|
167 |
+
"eval_loss": 0.578223705291748,
|
168 |
+
"eval_rewards/accuracies": 0.7250000238418579,
|
169 |
+
"eval_rewards/chosen": 0.7959616780281067,
|
170 |
+
"eval_rewards/margins": 0.9759488701820374,
|
171 |
+
"eval_rewards/rejected": -0.17998719215393066,
|
172 |
+
"eval_runtime": 252.8699,
|
173 |
+
"eval_samples_per_second": 15.024,
|
174 |
+
"eval_steps_per_second": 0.237,
|
175 |
+
"step": 50
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"epoch": 0.411214953271028,
|
179 |
+
"grad_norm": 36.85921784944549,
|
180 |
+
"learning_rate": 9.673426982785825e-07,
|
181 |
+
"logits/chosen": -2.1165783405303955,
|
182 |
+
"logits/rejected": -2.133802890777588,
|
183 |
+
"logps/chosen": -227.85147094726562,
|
184 |
+
"logps/rejected": -229.4573516845703,
|
185 |
+
"loss": 0.5854,
|
186 |
+
"rewards/accuracies": 0.731249988079071,
|
187 |
+
"rewards/chosen": 0.668209969997406,
|
188 |
+
"rewards/margins": 0.9655311703681946,
|
189 |
+
"rewards/rejected": -0.29732123017311096,
|
190 |
+
"step": 55
|
191 |
+
},
|
192 |
+
{
|
193 |
+
"epoch": 0.4485981308411215,
|
194 |
+
"grad_norm": 37.099348027626476,
|
195 |
+
"learning_rate": 9.597865553522297e-07,
|
196 |
+
"logits/chosen": -2.1299071311950684,
|
197 |
+
"logits/rejected": -2.1265180110931396,
|
198 |
+
"logps/chosen": -246.6769256591797,
|
199 |
+
"logps/rejected": -218.6841583251953,
|
200 |
+
"loss": 0.5468,
|
201 |
+
"rewards/accuracies": 0.699999988079071,
|
202 |
+
"rewards/chosen": 0.3689742088317871,
|
203 |
+
"rewards/margins": 1.112032175064087,
|
204 |
+
"rewards/rejected": -0.7430580258369446,
|
205 |
+
"step": 60
|
206 |
+
},
|
207 |
+
{
|
208 |
+
"epoch": 0.48598130841121495,
|
209 |
+
"grad_norm": 35.927665101781095,
|
210 |
+
"learning_rate": 9.514807982237785e-07,
|
211 |
+
"logits/chosen": -2.298119068145752,
|
212 |
+
"logits/rejected": -2.2940573692321777,
|
213 |
+
"logps/chosen": -265.78155517578125,
|
214 |
+
"logps/rejected": -213.521240234375,
|
215 |
+
"loss": 0.5918,
|
216 |
+
"rewards/accuracies": 0.75,
|
217 |
+
"rewards/chosen": 0.9371916055679321,
|
218 |
+
"rewards/margins": 1.3884761333465576,
|
219 |
+
"rewards/rejected": -0.45128464698791504,
|
220 |
+
"step": 65
|
221 |
+
},
|
222 |
+
{
|
223 |
+
"epoch": 0.5233644859813084,
|
224 |
+
"grad_norm": 39.11873955408514,
|
225 |
+
"learning_rate": 9.424389682058886e-07,
|
226 |
+
"logits/chosen": -2.3393406867980957,
|
227 |
+
"logits/rejected": -2.309872627258301,
|
228 |
+
"logps/chosen": -218.0382080078125,
|
229 |
+
"logps/rejected": -194.01332092285156,
|
230 |
+
"loss": 0.5295,
|
231 |
+
"rewards/accuracies": 0.7250000238418579,
|
232 |
+
"rewards/chosen": 0.9512729644775391,
|
233 |
+
"rewards/margins": 1.144668698310852,
|
234 |
+
"rewards/rejected": -0.19339559972286224,
|
235 |
+
"step": 70
|
236 |
+
},
|
237 |
+
{
|
238 |
+
"epoch": 0.5607476635514018,
|
239 |
+
"grad_norm": 37.634486315960864,
|
240 |
+
"learning_rate": 9.326758066695624e-07,
|
241 |
+
"logits/chosen": -2.34443736076355,
|
242 |
+
"logits/rejected": -2.325118064880371,
|
243 |
+
"logps/chosen": -259.53143310546875,
|
244 |
+
"logps/rejected": -198.3692626953125,
|
245 |
+
"loss": 0.5487,
|
246 |
+
"rewards/accuracies": 0.737500011920929,
|
247 |
+
"rewards/chosen": 0.9949586987495422,
|
248 |
+
"rewards/margins": 1.7472797632217407,
|
249 |
+
"rewards/rejected": -0.7523208856582642,
|
250 |
+
"step": 75
|
251 |
+
},
|
252 |
+
{
|
253 |
+
"epoch": 0.5981308411214953,
|
254 |
+
"grad_norm": 35.50580413675745,
|
255 |
+
"learning_rate": 9.222072310105126e-07,
|
256 |
+
"logits/chosen": -2.3364174365997314,
|
257 |
+
"logits/rejected": -2.312894105911255,
|
258 |
+
"logps/chosen": -236.32666015625,
|
259 |
+
"logps/rejected": -261.32843017578125,
|
260 |
+
"loss": 0.6152,
|
261 |
+
"rewards/accuracies": 0.71875,
|
262 |
+
"rewards/chosen": 0.37070125341415405,
|
263 |
+
"rewards/margins": 1.0360088348388672,
|
264 |
+
"rewards/rejected": -0.6653076410293579,
|
265 |
+
"step": 80
|
266 |
+
},
|
267 |
+
{
|
268 |
+
"epoch": 0.6355140186915887,
|
269 |
+
"grad_norm": 33.7374435363093,
|
270 |
+
"learning_rate": 9.110503086981955e-07,
|
271 |
+
"logits/chosen": -2.2803781032562256,
|
272 |
+
"logits/rejected": -2.267977237701416,
|
273 |
+
"logps/chosen": -255.5703887939453,
|
274 |
+
"logps/rejected": -206.46066284179688,
|
275 |
+
"loss": 0.553,
|
276 |
+
"rewards/accuracies": 0.800000011920929,
|
277 |
+
"rewards/chosen": 0.18914642930030823,
|
278 |
+
"rewards/margins": 1.2585302591323853,
|
279 |
+
"rewards/rejected": -1.0693838596343994,
|
280 |
+
"step": 85
|
281 |
+
},
|
282 |
+
{
|
283 |
+
"epoch": 0.6728971962616822,
|
284 |
+
"grad_norm": 39.897529811340966,
|
285 |
+
"learning_rate": 8.992232294498169e-07,
|
286 |
+
"logits/chosen": -2.1736109256744385,
|
287 |
+
"logits/rejected": -2.1623623371124268,
|
288 |
+
"logps/chosen": -255.2686309814453,
|
289 |
+
"logps/rejected": -225.6684112548828,
|
290 |
+
"loss": 0.5359,
|
291 |
+
"rewards/accuracies": 0.7875000238418579,
|
292 |
+
"rewards/chosen": 0.11371274292469025,
|
293 |
+
"rewards/margins": 1.4066712856292725,
|
294 |
+
"rewards/rejected": -1.2929584980010986,
|
295 |
+
"step": 90
|
296 |
+
},
|
297 |
+
{
|
298 |
+
"epoch": 0.7102803738317757,
|
299 |
+
"grad_norm": 36.88770484599986,
|
300 |
+
"learning_rate": 8.867452755746805e-07,
|
301 |
+
"logits/chosen": -2.1795907020568848,
|
302 |
+
"logits/rejected": -2.1622931957244873,
|
303 |
+
"logps/chosen": -267.36358642578125,
|
304 |
+
"logps/rejected": -237.27359008789062,
|
305 |
+
"loss": 0.4889,
|
306 |
+
"rewards/accuracies": 0.75,
|
307 |
+
"rewards/chosen": -0.24791303277015686,
|
308 |
+
"rewards/margins": 1.365724802017212,
|
309 |
+
"rewards/rejected": -1.6136376857757568,
|
310 |
+
"step": 95
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"epoch": 0.7476635514018691,
|
314 |
+
"grad_norm": 29.575103655541206,
|
315 |
+
"learning_rate": 8.736367905372246e-07,
|
316 |
+
"logits/chosen": -2.1824848651885986,
|
317 |
+
"logits/rejected": -2.164578914642334,
|
318 |
+
"logps/chosen": -262.33575439453125,
|
319 |
+
"logps/rejected": -242.2086639404297,
|
320 |
+
"loss": 0.5388,
|
321 |
+
"rewards/accuracies": 0.7749999761581421,
|
322 |
+
"rewards/chosen": -0.11618832498788834,
|
323 |
+
"rewards/margins": 1.8072330951690674,
|
324 |
+
"rewards/rejected": -1.9234212636947632,
|
325 |
+
"step": 100
|
326 |
+
},
|
327 |
+
{
|
328 |
+
"epoch": 0.7476635514018691,
|
329 |
+
"eval_logits/chosen": -2.199050188064575,
|
330 |
+
"eval_logits/rejected": -2.174029588699341,
|
331 |
+
"eval_logps/chosen": -252.93431091308594,
|
332 |
+
"eval_logps/rejected": -238.5013885498047,
|
333 |
+
"eval_loss": 0.5390673875808716,
|
334 |
+
"eval_rewards/accuracies": 0.7479166388511658,
|
335 |
+
"eval_rewards/chosen": -0.4397614300251007,
|
336 |
+
"eval_rewards/margins": 1.5734889507293701,
|
337 |
+
"eval_rewards/rejected": -2.0132501125335693,
|
338 |
+
"eval_runtime": 252.4297,
|
339 |
+
"eval_samples_per_second": 15.05,
|
340 |
+
"eval_steps_per_second": 0.238,
|
341 |
+
"step": 100
|
342 |
+
},
|
343 |
+
{
|
344 |
+
"epoch": 0.7850467289719626,
|
345 |
+
"grad_norm": 33.93716964639979,
|
346 |
+
"learning_rate": 8.599191457900016e-07,
|
347 |
+
"logits/chosen": -2.2423758506774902,
|
348 |
+
"logits/rejected": -2.215951919555664,
|
349 |
+
"logps/chosen": -248.74136352539062,
|
350 |
+
"logps/rejected": -238.6445770263672,
|
351 |
+
"loss": 0.5277,
|
352 |
+
"rewards/accuracies": 0.793749988079071,
|
353 |
+
"rewards/chosen": -0.2006298005580902,
|
354 |
+
"rewards/margins": 1.6982009410858154,
|
355 |
+
"rewards/rejected": -1.898830771446228,
|
356 |
+
"step": 105
|
357 |
+
},
|
358 |
+
{
|
359 |
+
"epoch": 0.822429906542056,
|
360 |
+
"grad_norm": 34.93462881131966,
|
361 |
+
"learning_rate": 8.456147059306757e-07,
|
362 |
+
"logits/chosen": -2.3176093101501465,
|
363 |
+
"logits/rejected": -2.312403440475464,
|
364 |
+
"logps/chosen": -260.4902648925781,
|
365 |
+
"logps/rejected": -202.03927612304688,
|
366 |
+
"loss": 0.5658,
|
367 |
+
"rewards/accuracies": 0.7875000238418579,
|
368 |
+
"rewards/chosen": -0.13997402787208557,
|
369 |
+
"rewards/margins": 1.6242597103118896,
|
370 |
+
"rewards/rejected": -1.7642338275909424,
|
371 |
+
"step": 110
|
372 |
+
},
|
373 |
+
{
|
374 |
+
"epoch": 0.8598130841121495,
|
375 |
+
"grad_norm": 33.1410955281443,
|
376 |
+
"learning_rate": 8.307467922398432e-07,
|
377 |
+
"logits/chosen": -2.4065065383911133,
|
378 |
+
"logits/rejected": -2.4101977348327637,
|
379 |
+
"logps/chosen": -249.76541137695312,
|
380 |
+
"logps/rejected": -247.932861328125,
|
381 |
+
"loss": 0.5152,
|
382 |
+
"rewards/accuracies": 0.800000011920929,
|
383 |
+
"rewards/chosen": -0.18386869132518768,
|
384 |
+
"rewards/margins": 1.8899492025375366,
|
385 |
+
"rewards/rejected": -2.073817729949951,
|
386 |
+
"step": 115
|
387 |
+
},
|
388 |
+
{
|
389 |
+
"epoch": 0.897196261682243,
|
390 |
+
"grad_norm": 31.607304609592937,
|
391 |
+
"learning_rate": 8.15339644659121e-07,
|
392 |
+
"logits/chosen": -2.4259490966796875,
|
393 |
+
"logits/rejected": -2.41045880317688,
|
394 |
+
"logps/chosen": -258.97723388671875,
|
395 |
+
"logps/rejected": -221.7887420654297,
|
396 |
+
"loss": 0.5595,
|
397 |
+
"rewards/accuracies": 0.78125,
|
398 |
+
"rewards/chosen": -0.14788323640823364,
|
399 |
+
"rewards/margins": 1.9169807434082031,
|
400 |
+
"rewards/rejected": -2.064863681793213,
|
401 |
+
"step": 120
|
402 |
+
},
|
403 |
+
{
|
404 |
+
"epoch": 0.9345794392523364,
|
405 |
+
"grad_norm": 32.67833778299361,
|
406 |
+
"learning_rate": 7.994183822714968e-07,
|
407 |
+
"logits/chosen": -2.4193129539489746,
|
408 |
+
"logits/rejected": -2.4144270420074463,
|
409 |
+
"logps/chosen": -262.0440368652344,
|
410 |
+
"logps/rejected": -223.1062469482422,
|
411 |
+
"loss": 0.4636,
|
412 |
+
"rewards/accuracies": 0.7875000238418579,
|
413 |
+
"rewards/chosen": -0.24569320678710938,
|
414 |
+
"rewards/margins": 2.0063912868499756,
|
415 |
+
"rewards/rejected": -2.252084970474243,
|
416 |
+
"step": 125
|
417 |
+
},
|
418 |
+
{
|
419 |
+
"epoch": 0.9719626168224299,
|
420 |
+
"grad_norm": 28.586935737564,
|
421 |
+
"learning_rate": 7.830089623483656e-07,
|
422 |
+
"logits/chosen": -2.431533098220825,
|
423 |
+
"logits/rejected": -2.414226531982422,
|
424 |
+
"logps/chosen": -250.75613403320312,
|
425 |
+
"logps/rejected": -233.8451385498047,
|
426 |
+
"loss": 0.4432,
|
427 |
+
"rewards/accuracies": 0.7875000238418579,
|
428 |
+
"rewards/chosen": -0.2264728844165802,
|
429 |
+
"rewards/margins": 2.210639476776123,
|
430 |
+
"rewards/rejected": -2.4371120929718018,
|
431 |
+
"step": 130
|
432 |
+
},
|
433 |
+
{
|
434 |
+
"epoch": 1.0093457943925233,
|
435 |
+
"grad_norm": 21.434099176546553,
|
436 |
+
"learning_rate": 7.661381380300253e-07,
|
437 |
+
"logits/chosen": -2.442919969558716,
|
438 |
+
"logits/rejected": -2.422956705093384,
|
439 |
+
"logps/chosen": -274.38555908203125,
|
440 |
+
"logps/rejected": -250.0774688720703,
|
441 |
+
"loss": 0.4499,
|
442 |
+
"rewards/accuracies": 0.8500000238418579,
|
443 |
+
"rewards/chosen": 0.05850023776292801,
|
444 |
+
"rewards/margins": 2.6394431591033936,
|
445 |
+
"rewards/rejected": -2.5809426307678223,
|
446 |
+
"step": 135
|
447 |
+
},
|
448 |
+
{
|
449 |
+
"epoch": 1.0467289719626167,
|
450 |
+
"grad_norm": 18.591470106307337,
|
451 |
+
"learning_rate": 7.488334147086263e-07,
|
452 |
+
"logits/chosen": -2.431138515472412,
|
453 |
+
"logits/rejected": -2.4030303955078125,
|
454 |
+
"logps/chosen": -250.7612762451172,
|
455 |
+
"logps/rejected": -224.02401733398438,
|
456 |
+
"loss": 0.2627,
|
457 |
+
"rewards/accuracies": 0.893750011920929,
|
458 |
+
"rewards/chosen": 0.4705049395561218,
|
459 |
+
"rewards/margins": 2.965265989303589,
|
460 |
+
"rewards/rejected": -2.494760751724243,
|
461 |
+
"step": 140
|
462 |
+
},
|
463 |
+
{
|
464 |
+
"epoch": 1.0841121495327102,
|
465 |
+
"grad_norm": 26.220775795974554,
|
466 |
+
"learning_rate": 7.311230051846819e-07,
|
467 |
+
"logits/chosen": -2.4043233394622803,
|
468 |
+
"logits/rejected": -2.3778297901153564,
|
469 |
+
"logps/chosen": -225.55380249023438,
|
470 |
+
"logps/rejected": -237.52963256835938,
|
471 |
+
"loss": 0.2753,
|
472 |
+
"rewards/accuracies": 0.925000011920929,
|
473 |
+
"rewards/chosen": 0.8029943704605103,
|
474 |
+
"rewards/margins": 3.111466646194458,
|
475 |
+
"rewards/rejected": -2.3084726333618164,
|
476 |
+
"step": 145
|
477 |
+
},
|
478 |
+
{
|
479 |
+
"epoch": 1.1214953271028036,
|
480 |
+
"grad_norm": 20.647616721664097,
|
481 |
+
"learning_rate": 7.130357836702577e-07,
|
482 |
+
"logits/chosen": -2.3966336250305176,
|
483 |
+
"logits/rejected": -2.3554282188415527,
|
484 |
+
"logps/chosen": -260.1661376953125,
|
485 |
+
"logps/rejected": -240.3435821533203,
|
486 |
+
"loss": 0.2653,
|
487 |
+
"rewards/accuracies": 0.90625,
|
488 |
+
"rewards/chosen": 1.162467360496521,
|
489 |
+
"rewards/margins": 3.1894516944885254,
|
490 |
+
"rewards/rejected": -2.026984214782715,
|
491 |
+
"step": 150
|
492 |
+
},
|
493 |
+
{
|
494 |
+
"epoch": 1.1214953271028036,
|
495 |
+
"eval_logits/chosen": -2.3485267162323,
|
496 |
+
"eval_logits/rejected": -2.32659649848938,
|
497 |
+
"eval_logps/chosen": -245.67454528808594,
|
498 |
+
"eval_logps/rejected": -235.21473693847656,
|
499 |
+
"eval_loss": 0.5246723890304565,
|
500 |
+
"eval_rewards/accuracies": 0.7645833492279053,
|
501 |
+
"eval_rewards/chosen": 0.2862185537815094,
|
502 |
+
"eval_rewards/margins": 1.9708030223846436,
|
503 |
+
"eval_rewards/rejected": -1.6845842599868774,
|
504 |
+
"eval_runtime": 253.0283,
|
505 |
+
"eval_samples_per_second": 15.014,
|
506 |
+
"eval_steps_per_second": 0.237,
|
507 |
+
"step": 150
|
508 |
+
},
|
509 |
+
{
|
510 |
+
"epoch": 1.158878504672897,
|
511 |
+
"grad_norm": 23.50919746826828,
|
512 |
+
"learning_rate": 6.946012387138247e-07,
|
513 |
+
"logits/chosen": -2.3319590091705322,
|
514 |
+
"logits/rejected": -2.305656909942627,
|
515 |
+
"logps/chosen": -240.41323852539062,
|
516 |
+
"logps/rejected": -236.7122802734375,
|
517 |
+
"loss": 0.2499,
|
518 |
+
"rewards/accuracies": 0.8500000238418579,
|
519 |
+
"rewards/chosen": 0.9150522947311401,
|
520 |
+
"rewards/margins": 3.2040953636169434,
|
521 |
+
"rewards/rejected": -2.2890431880950928,
|
522 |
+
"step": 155
|
523 |
+
},
|
524 |
+
{
|
525 |
+
"epoch": 1.1962616822429906,
|
526 |
+
"grad_norm": 16.10335481559186,
|
527 |
+
"learning_rate": 6.758494251235274e-07,
|
528 |
+
"logits/chosen": -2.288930654525757,
|
529 |
+
"logits/rejected": -2.2800326347351074,
|
530 |
+
"logps/chosen": -252.2129364013672,
|
531 |
+
"logps/rejected": -242.2833251953125,
|
532 |
+
"loss": 0.2442,
|
533 |
+
"rewards/accuracies": 0.8999999761581421,
|
534 |
+
"rewards/chosen": 1.06269371509552,
|
535 |
+
"rewards/margins": 3.869558334350586,
|
536 |
+
"rewards/rejected": -2.8068645000457764,
|
537 |
+
"step": 160
|
538 |
+
},
|
539 |
+
{
|
540 |
+
"epoch": 1.233644859813084,
|
541 |
+
"grad_norm": 18.48336670058505,
|
542 |
+
"learning_rate": 6.568109149672496e-07,
|
543 |
+
"logits/chosen": -2.2865896224975586,
|
544 |
+
"logits/rejected": -2.2526893615722656,
|
545 |
+
"logps/chosen": -253.0467529296875,
|
546 |
+
"logps/rejected": -230.2500762939453,
|
547 |
+
"loss": 0.2703,
|
548 |
+
"rewards/accuracies": 0.9125000238418579,
|
549 |
+
"rewards/chosen": 1.3167855739593506,
|
550 |
+
"rewards/margins": 3.570039749145508,
|
551 |
+
"rewards/rejected": -2.2532541751861572,
|
552 |
+
"step": 165
|
553 |
+
},
|
554 |
+
{
|
555 |
+
"epoch": 1.2710280373831775,
|
556 |
+
"grad_norm": 25.61953738965752,
|
557 |
+
"learning_rate": 6.375167477293648e-07,
|
558 |
+
"logits/chosen": -2.304429292678833,
|
559 |
+
"logits/rejected": -2.293348789215088,
|
560 |
+
"logps/chosen": -239.5231170654297,
|
561 |
+
"logps/rejected": -229.12246704101562,
|
562 |
+
"loss": 0.2725,
|
563 |
+
"rewards/accuracies": 0.8999999761581421,
|
564 |
+
"rewards/chosen": 1.3146635293960571,
|
565 |
+
"rewards/margins": 3.4736862182617188,
|
566 |
+
"rewards/rejected": -2.159022808074951,
|
567 |
+
"step": 170
|
568 |
+
},
|
569 |
+
{
|
570 |
+
"epoch": 1.308411214953271,
|
571 |
+
"grad_norm": 25.42815054266923,
|
572 |
+
"learning_rate": 6.179983797054321e-07,
|
573 |
+
"logits/chosen": -2.3991851806640625,
|
574 |
+
"logits/rejected": -2.3383266925811768,
|
575 |
+
"logps/chosen": -236.6082000732422,
|
576 |
+
"logps/rejected": -269.2547607421875,
|
577 |
+
"loss": 0.2646,
|
578 |
+
"rewards/accuracies": 0.925000011920929,
|
579 |
+
"rewards/chosen": 0.4701949954032898,
|
580 |
+
"rewards/margins": 4.1486945152282715,
|
581 |
+
"rewards/rejected": -3.6785004138946533,
|
582 |
+
"step": 175
|
583 |
+
},
|
584 |
+
{
|
585 |
+
"epoch": 1.3457943925233644,
|
586 |
+
"grad_norm": 25.323140928512764,
|
587 |
+
"learning_rate": 5.982876327173427e-07,
|
588 |
+
"logits/chosen": -2.4732565879821777,
|
589 |
+
"logits/rejected": -2.43239164352417,
|
590 |
+
"logps/chosen": -274.3072814941406,
|
591 |
+
"logps/rejected": -239.09585571289062,
|
592 |
+
"loss": 0.3069,
|
593 |
+
"rewards/accuracies": 0.862500011920929,
|
594 |
+
"rewards/chosen": 0.0698857456445694,
|
595 |
+
"rewards/margins": 3.551178455352783,
|
596 |
+
"rewards/rejected": -3.481292247772217,
|
597 |
+
"step": 180
|
598 |
+
},
|
599 |
+
{
|
600 |
+
"epoch": 1.3831775700934579,
|
601 |
+
"grad_norm": 20.834596330130136,
|
602 |
+
"learning_rate": 5.78416642232531e-07,
|
603 |
+
"logits/chosen": -2.4427692890167236,
|
604 |
+
"logits/rejected": -2.4098360538482666,
|
605 |
+
"logps/chosen": -258.0860900878906,
|
606 |
+
"logps/rejected": -243.33206176757812,
|
607 |
+
"loss": 0.2444,
|
608 |
+
"rewards/accuracies": 0.875,
|
609 |
+
"rewards/chosen": 0.3224690556526184,
|
610 |
+
"rewards/margins": 3.5159637928009033,
|
611 |
+
"rewards/rejected": -3.1934947967529297,
|
612 |
+
"step": 185
|
613 |
+
},
|
614 |
+
{
|
615 |
+
"epoch": 1.4205607476635513,
|
616 |
+
"grad_norm": 18.851032795980213,
|
617 |
+
"learning_rate": 5.584178049718314e-07,
|
618 |
+
"logits/chosen": -2.4080841541290283,
|
619 |
+
"logits/rejected": -2.3888661861419678,
|
620 |
+
"logps/chosen": -242.3834686279297,
|
621 |
+
"logps/rejected": -247.5576629638672,
|
622 |
+
"loss": 0.2604,
|
623 |
+
"rewards/accuracies": 0.875,
|
624 |
+
"rewards/chosen": 0.38662514090538025,
|
625 |
+
"rewards/margins": 3.3747589588165283,
|
626 |
+
"rewards/rejected": -2.988133668899536,
|
627 |
+
"step": 190
|
628 |
+
},
|
629 |
+
{
|
630 |
+
"epoch": 1.4579439252336448,
|
631 |
+
"grad_norm": 24.324007820476073,
|
632 |
+
"learning_rate": 5.38323726091401e-07,
|
633 |
+
"logits/chosen": -2.436607837677002,
|
634 |
+
"logits/rejected": -2.391335964202881,
|
635 |
+
"logps/chosen": -250.54647827148438,
|
636 |
+
"logps/rejected": -239.52590942382812,
|
637 |
+
"loss": 0.3042,
|
638 |
+
"rewards/accuracies": 0.8374999761581421,
|
639 |
+
"rewards/chosen": -0.005544149782508612,
|
640 |
+
"rewards/margins": 3.300248384475708,
|
641 |
+
"rewards/rejected": -3.3057925701141357,
|
642 |
+
"step": 195
|
643 |
+
},
|
644 |
+
{
|
645 |
+
"epoch": 1.4953271028037383,
|
646 |
+
"grad_norm": 21.752837131187086,
|
647 |
+
"learning_rate": 5.181671660248178e-07,
|
648 |
+
"logits/chosen": -2.4886248111724854,
|
649 |
+
"logits/rejected": -2.4703927040100098,
|
650 |
+
"logps/chosen": -239.51376342773438,
|
651 |
+
"logps/rejected": -247.77114868164062,
|
652 |
+
"loss": 0.2571,
|
653 |
+
"rewards/accuracies": 0.893750011920929,
|
654 |
+
"rewards/chosen": -0.03603418171405792,
|
655 |
+
"rewards/margins": 3.4513862133026123,
|
656 |
+
"rewards/rejected": -3.4874203205108643,
|
657 |
+
"step": 200
|
658 |
+
},
|
659 |
+
{
|
660 |
+
"epoch": 1.4953271028037383,
|
661 |
+
"eval_logits/chosen": -2.5015809535980225,
|
662 |
+
"eval_logits/rejected": -2.475208282470703,
|
663 |
+
"eval_logps/chosen": -254.51597595214844,
|
664 |
+
"eval_logps/rejected": -249.1765594482422,
|
665 |
+
"eval_loss": 0.5108085870742798,
|
666 |
+
"eval_rewards/accuracies": 0.7791666388511658,
|
667 |
+
"eval_rewards/chosen": -0.5979260802268982,
|
668 |
+
"eval_rewards/margins": 2.482844591140747,
|
669 |
+
"eval_rewards/rejected": -3.080770254135132,
|
670 |
+
"eval_runtime": 252.5989,
|
671 |
+
"eval_samples_per_second": 15.04,
|
672 |
+
"eval_steps_per_second": 0.238,
|
673 |
+
"step": 200
|
674 |
+
},
|
675 |
+
{
|
676 |
+
"epoch": 1.5327102803738317,
|
677 |
+
"grad_norm": 22.788597725922575,
|
678 |
+
"learning_rate": 4.979809870720242e-07,
|
679 |
+
"logits/chosen": -2.5105209350585938,
|
680 |
+
"logits/rejected": -2.48913836479187,
|
681 |
+
"logps/chosen": -252.13818359375,
|
682 |
+
"logps/rejected": -243.4073944091797,
|
683 |
+
"loss": 0.2725,
|
684 |
+
"rewards/accuracies": 0.8812500238418579,
|
685 |
+
"rewards/chosen": -0.011826371774077415,
|
686 |
+
"rewards/margins": 3.6809163093566895,
|
687 |
+
"rewards/rejected": -3.6927428245544434,
|
688 |
+
"step": 205
|
689 |
+
},
|
690 |
+
{
|
691 |
+
"epoch": 1.5700934579439252,
|
692 |
+
"grad_norm": 22.065080050736203,
|
693 |
+
"learning_rate": 4.777980998221901e-07,
|
694 |
+
"logits/chosen": -2.4797415733337402,
|
695 |
+
"logits/rejected": -2.4482269287109375,
|
696 |
+
"logps/chosen": -224.90585327148438,
|
697 |
+
"logps/rejected": -238.3304443359375,
|
698 |
+
"loss": 0.283,
|
699 |
+
"rewards/accuracies": 0.887499988079071,
|
700 |
+
"rewards/chosen": -0.0005477949744090438,
|
701 |
+
"rewards/margins": 3.547239303588867,
|
702 |
+
"rewards/rejected": -3.5477871894836426,
|
703 |
+
"step": 210
|
704 |
+
},
|
705 |
+
{
|
706 |
+
"epoch": 1.6074766355140186,
|
707 |
+
"grad_norm": 24.51095857605576,
|
708 |
+
"learning_rate": 4.5765140949784923e-07,
|
709 |
+
"logits/chosen": -2.4635329246520996,
|
710 |
+
"logits/rejected": -2.4240822792053223,
|
711 |
+
"logps/chosen": -256.9695739746094,
|
712 |
+
"logps/rejected": -243.4046630859375,
|
713 |
+
"loss": 0.3058,
|
714 |
+
"rewards/accuracies": 0.918749988079071,
|
715 |
+
"rewards/chosen": 0.43238821625709534,
|
716 |
+
"rewards/margins": 3.786862850189209,
|
717 |
+
"rewards/rejected": -3.3544750213623047,
|
718 |
+
"step": 215
|
719 |
+
},
|
720 |
+
{
|
721 |
+
"epoch": 1.644859813084112,
|
722 |
+
"grad_norm": 20.419017482003486,
|
723 |
+
"learning_rate": 4.3757376230778383e-07,
|
724 |
+
"logits/chosen": -2.4464869499206543,
|
725 |
+
"logits/rejected": -2.394662618637085,
|
726 |
+
"logps/chosen": -249.76754760742188,
|
727 |
+
"logps/rejected": -256.5289306640625,
|
728 |
+
"loss": 0.2327,
|
729 |
+
"rewards/accuracies": 0.8999999761581421,
|
730 |
+
"rewards/chosen": 0.8897647857666016,
|
731 |
+
"rewards/margins": 4.213340759277344,
|
732 |
+
"rewards/rejected": -3.3235764503479004,
|
733 |
+
"step": 220
|
734 |
+
},
|
735 |
+
{
|
736 |
+
"epoch": 1.6822429906542056,
|
737 |
+
"grad_norm": 53.385379065169666,
|
738 |
+
"learning_rate": 4.1759789189612333e-07,
|
739 |
+
"logits/chosen": -2.394674777984619,
|
740 |
+
"logits/rejected": -2.3603250980377197,
|
741 |
+
"logps/chosen": -253.1683349609375,
|
742 |
+
"logps/rejected": -259.30975341796875,
|
743 |
+
"loss": 0.2888,
|
744 |
+
"rewards/accuracies": 0.9125000238418579,
|
745 |
+
"rewards/chosen": 0.5363051891326904,
|
746 |
+
"rewards/margins": 3.8216965198516846,
|
747 |
+
"rewards/rejected": -3.285390853881836,
|
748 |
+
"step": 225
|
749 |
+
},
|
750 |
+
{
|
751 |
+
"epoch": 1.719626168224299,
|
752 |
+
"grad_norm": 19.461317653850365,
|
753 |
+
"learning_rate": 3.9775636597496285e-07,
|
754 |
+
"logits/chosen": -2.3545804023742676,
|
755 |
+
"logits/rejected": -2.356778621673584,
|
756 |
+
"logps/chosen": -246.68734741210938,
|
757 |
+
"logps/rejected": -241.13998413085938,
|
758 |
+
"loss": 0.2588,
|
759 |
+
"rewards/accuracies": 0.862500011920929,
|
760 |
+
"rewards/chosen": 0.10117466747760773,
|
761 |
+
"rewards/margins": 3.4182426929473877,
|
762 |
+
"rewards/rejected": -3.317068099975586,
|
763 |
+
"step": 230
|
764 |
+
},
|
765 |
+
{
|
766 |
+
"epoch": 1.7570093457943925,
|
767 |
+
"grad_norm": 22.752036394982305,
|
768 |
+
"learning_rate": 3.7808153322750893e-07,
|
769 |
+
"logits/chosen": -2.379742383956909,
|
770 |
+
"logits/rejected": -2.3354058265686035,
|
771 |
+
"logps/chosen": -243.09249877929688,
|
772 |
+
"logps/rejected": -249.0592498779297,
|
773 |
+
"loss": 0.2849,
|
774 |
+
"rewards/accuracies": 0.9125000238418579,
|
775 |
+
"rewards/chosen": 0.22115659713745117,
|
776 |
+
"rewards/margins": 3.8574492931365967,
|
777 |
+
"rewards/rejected": -3.6362926959991455,
|
778 |
+
"step": 235
|
779 |
+
},
|
780 |
+
{
|
781 |
+
"epoch": 1.794392523364486,
|
782 |
+
"grad_norm": 25.661272927251265,
|
783 |
+
"learning_rate": 3.586054705683208e-07,
|
784 |
+
"logits/chosen": -2.387585163116455,
|
785 |
+
"logits/rejected": -2.333857774734497,
|
786 |
+
"logps/chosen": -273.67315673828125,
|
787 |
+
"logps/rejected": -271.0914306640625,
|
788 |
+
"loss": 0.2909,
|
789 |
+
"rewards/accuracies": 0.893750011920929,
|
790 |
+
"rewards/chosen": 0.6078623533248901,
|
791 |
+
"rewards/margins": 3.6751747131347656,
|
792 |
+
"rewards/rejected": -3.0673117637634277,
|
793 |
+
"step": 240
|
794 |
+
},
|
795 |
+
{
|
796 |
+
"epoch": 1.8317757009345794,
|
797 |
+
"grad_norm": 25.180396150597772,
|
798 |
+
"learning_rate": 3.393599308466285e-07,
|
799 |
+
"logits/chosen": -2.401310920715332,
|
800 |
+
"logits/rejected": -2.336974620819092,
|
801 |
+
"logps/chosen": -256.4820251464844,
|
802 |
+
"logps/rejected": -246.9813232421875,
|
803 |
+
"loss": 0.282,
|
804 |
+
"rewards/accuracies": 0.887499988079071,
|
805 |
+
"rewards/chosen": 0.10372234880924225,
|
806 |
+
"rewards/margins": 3.3294150829315186,
|
807 |
+
"rewards/rejected": -3.2256927490234375,
|
808 |
+
"step": 245
|
809 |
+
},
|
810 |
+
{
|
811 |
+
"epoch": 1.8691588785046729,
|
812 |
+
"grad_norm": 18.724955408080817,
|
813 |
+
"learning_rate": 3.203762910779944e-07,
|
814 |
+
"logits/chosen": -2.404759645462036,
|
815 |
+
"logits/rejected": -2.3678829669952393,
|
816 |
+
"logps/chosen": -253.46896362304688,
|
817 |
+
"logps/rejected": -234.8633575439453,
|
818 |
+
"loss": 0.2803,
|
819 |
+
"rewards/accuracies": 0.887499988079071,
|
820 |
+
"rewards/chosen": 0.2244938164949417,
|
821 |
+
"rewards/margins": 3.192117214202881,
|
822 |
+
"rewards/rejected": -2.967623233795166,
|
823 |
+
"step": 250
|
824 |
+
},
|
825 |
+
{
|
826 |
+
"epoch": 1.8691588785046729,
|
827 |
+
"eval_logits/chosen": -2.4107203483581543,
|
828 |
+
"eval_logits/rejected": -2.3852570056915283,
|
829 |
+
"eval_logps/chosen": -251.44602966308594,
|
830 |
+
"eval_logps/rejected": -245.23483276367188,
|
831 |
+
"eval_loss": 0.48170700669288635,
|
832 |
+
"eval_rewards/accuracies": 0.7854166626930237,
|
833 |
+
"eval_rewards/chosen": -0.2909303605556488,
|
834 |
+
"eval_rewards/margins": 2.3956663608551025,
|
835 |
+
"eval_rewards/rejected": -2.686596632003784,
|
836 |
+
"eval_runtime": 252.2818,
|
837 |
+
"eval_samples_per_second": 15.059,
|
838 |
+
"eval_steps_per_second": 0.238,
|
839 |
+
"step": 250
|
840 |
+
},
|
841 |
+
{
|
842 |
+
"epoch": 1.9065420560747663,
|
843 |
+
"grad_norm": 29.924010084948176,
|
844 |
+
"learning_rate": 3.0168550128871264e-07,
|
845 |
+
"logits/chosen": -2.4268598556518555,
|
846 |
+
"logits/rejected": -2.3924014568328857,
|
847 |
+
"logps/chosen": -237.61965942382812,
|
848 |
+
"logps/rejected": -245.6161346435547,
|
849 |
+
"loss": 0.2684,
|
850 |
+
"rewards/accuracies": 0.893750011920929,
|
851 |
+
"rewards/chosen": 0.2175990641117096,
|
852 |
+
"rewards/margins": 3.640658140182495,
|
853 |
+
"rewards/rejected": -3.4230587482452393,
|
854 |
+
"step": 255
|
855 |
+
},
|
856 |
+
{
|
857 |
+
"epoch": 1.9439252336448598,
|
858 |
+
"grad_norm": 26.71214593633519,
|
859 |
+
"learning_rate": 2.833180340563554e-07,
|
860 |
+
"logits/chosen": -2.425642728805542,
|
861 |
+
"logits/rejected": -2.399965763092041,
|
862 |
+
"logps/chosen": -231.1629180908203,
|
863 |
+
"logps/rejected": -250.3664093017578,
|
864 |
+
"loss": 0.2788,
|
865 |
+
"rewards/accuracies": 0.824999988079071,
|
866 |
+
"rewards/chosen": 0.05841640383005142,
|
867 |
+
"rewards/margins": 3.254362106323242,
|
868 |
+
"rewards/rejected": -3.1959457397460938,
|
869 |
+
"step": 260
|
870 |
+
},
|
871 |
+
{
|
872 |
+
"epoch": 1.9813084112149533,
|
873 |
+
"grad_norm": 23.089942965590083,
|
874 |
+
"learning_rate": 2.653038348287261e-07,
|
875 |
+
"logits/chosen": -2.4249393939971924,
|
876 |
+
"logits/rejected": -2.3924624919891357,
|
877 |
+
"logps/chosen": -267.17547607421875,
|
878 |
+
"logps/rejected": -267.37066650390625,
|
879 |
+
"loss": 0.2624,
|
880 |
+
"rewards/accuracies": 0.893750011920929,
|
881 |
+
"rewards/chosen": 0.4873844087123871,
|
882 |
+
"rewards/margins": 3.554990291595459,
|
883 |
+
"rewards/rejected": -3.0676064491271973,
|
884 |
+
"step": 265
|
885 |
+
},
|
886 |
+
{
|
887 |
+
"epoch": 2.0186915887850465,
|
888 |
+
"grad_norm": 24.317059270551702,
|
889 |
+
"learning_rate": 2.476722731022207e-07,
|
890 |
+
"logits/chosen": -2.397399663925171,
|
891 |
+
"logits/rejected": -2.3522555828094482,
|
892 |
+
"logps/chosen": -270.63812255859375,
|
893 |
+
"logps/rejected": -237.6252899169922,
|
894 |
+
"loss": 0.2462,
|
895 |
+
"rewards/accuracies": 0.925000011920929,
|
896 |
+
"rewards/chosen": 0.7779295444488525,
|
897 |
+
"rewards/margins": 4.320176601409912,
|
898 |
+
"rewards/rejected": -3.5422470569610596,
|
899 |
+
"step": 270
|
900 |
+
},
|
901 |
+
{
|
902 |
+
"epoch": 2.05607476635514,
|
903 |
+
"grad_norm": 24.99157167857996,
|
904 |
+
"learning_rate": 2.3045209453919407e-07,
|
905 |
+
"logits/chosen": -2.3702712059020996,
|
906 |
+
"logits/rejected": -2.362605571746826,
|
907 |
+
"logps/chosen": -238.41159057617188,
|
908 |
+
"logps/rejected": -242.57275390625,
|
909 |
+
"loss": 0.1566,
|
910 |
+
"rewards/accuracies": 0.9437500238418579,
|
911 |
+
"rewards/chosen": 0.2242518663406372,
|
912 |
+
"rewards/margins": 3.7089030742645264,
|
913 |
+
"rewards/rejected": -3.4846510887145996,
|
914 |
+
"step": 275
|
915 |
+
},
|
916 |
+
{
|
917 |
+
"epoch": 2.0934579439252334,
|
918 |
+
"grad_norm": 19.242571139212618,
|
919 |
+
"learning_rate": 2.13671374102394e-07,
|
920 |
+
"logits/chosen": -2.362250804901123,
|
921 |
+
"logits/rejected": -2.343247175216675,
|
922 |
+
"logps/chosen": -276.24969482421875,
|
923 |
+
"logps/rejected": -272.6140441894531,
|
924 |
+
"loss": 0.1735,
|
925 |
+
"rewards/accuracies": 0.956250011920929,
|
926 |
+
"rewards/chosen": 0.516792893409729,
|
927 |
+
"rewards/margins": 4.325055122375488,
|
928 |
+
"rewards/rejected": -3.808262348175049,
|
929 |
+
"step": 280
|
930 |
+
},
|
931 |
+
{
|
932 |
+
"epoch": 2.130841121495327,
|
933 |
+
"grad_norm": 15.533308886613643,
|
934 |
+
"learning_rate": 1.9735747028287342e-07,
|
935 |
+
"logits/chosen": -2.3508124351501465,
|
936 |
+
"logits/rejected": -2.330045700073242,
|
937 |
+
"logps/chosen": -239.631103515625,
|
938 |
+
"logps/rejected": -263.2986145019531,
|
939 |
+
"loss": 0.1593,
|
940 |
+
"rewards/accuracies": 0.949999988079071,
|
941 |
+
"rewards/chosen": 0.07615267485380173,
|
942 |
+
"rewards/margins": 4.113415718078613,
|
943 |
+
"rewards/rejected": -4.037262916564941,
|
944 |
+
"step": 285
|
945 |
+
},
|
946 |
+
{
|
947 |
+
"epoch": 2.1682242990654204,
|
948 |
+
"grad_norm": 15.699144927979777,
|
949 |
+
"learning_rate": 1.815369804960034e-07,
|
950 |
+
"logits/chosen": -2.35709547996521,
|
951 |
+
"logits/rejected": -2.3328442573547363,
|
952 |
+
"logps/chosen": -244.9105987548828,
|
953 |
+
"logps/rejected": -255.9078826904297,
|
954 |
+
"loss": 0.1619,
|
955 |
+
"rewards/accuracies": 0.90625,
|
956 |
+
"rewards/chosen": 0.3098369538784027,
|
957 |
+
"rewards/margins": 4.1518449783325195,
|
958 |
+
"rewards/rejected": -3.842008113861084,
|
959 |
+
"step": 290
|
960 |
+
},
|
961 |
+
{
|
962 |
+
"epoch": 2.205607476635514,
|
963 |
+
"grad_norm": 20.58346646707518,
|
964 |
+
"learning_rate": 1.6623569771830852e-07,
|
965 |
+
"logits/chosen": -2.3732151985168457,
|
966 |
+
"logits/rejected": -2.3346047401428223,
|
967 |
+
"logps/chosen": -247.153564453125,
|
968 |
+
"logps/rejected": -250.949951171875,
|
969 |
+
"loss": 0.1841,
|
970 |
+
"rewards/accuracies": 0.925000011920929,
|
971 |
+
"rewards/chosen": 0.4144899249076843,
|
972 |
+
"rewards/margins": 3.991154909133911,
|
973 |
+
"rewards/rejected": -3.5766654014587402,
|
974 |
+
"step": 295
|
975 |
+
},
|
976 |
+
{
|
977 |
+
"epoch": 2.2429906542056073,
|
978 |
+
"grad_norm": 12.509918099598698,
|
979 |
+
"learning_rate": 1.5147856843582002e-07,
|
980 |
+
"logits/chosen": -2.349177360534668,
|
981 |
+
"logits/rejected": -2.3184986114501953,
|
982 |
+
"logps/chosen": -242.14108276367188,
|
983 |
+
"logps/rejected": -242.0617218017578,
|
984 |
+
"loss": 0.1739,
|
985 |
+
"rewards/accuracies": 0.9624999761581421,
|
986 |
+
"rewards/chosen": 0.6371382474899292,
|
987 |
+
"rewards/margins": 4.000860214233398,
|
988 |
+
"rewards/rejected": -3.3637218475341797,
|
989 |
+
"step": 300
|
990 |
+
},
|
991 |
+
{
|
992 |
+
"epoch": 2.2429906542056073,
|
993 |
+
"eval_logits/chosen": -2.355950355529785,
|
994 |
+
"eval_logits/rejected": -2.3281238079071045,
|
995 |
+
"eval_logps/chosen": -252.35202026367188,
|
996 |
+
"eval_logps/rejected": -246.845947265625,
|
997 |
+
"eval_loss": 0.49124717712402344,
|
998 |
+
"eval_rewards/accuracies": 0.7916666865348816,
|
999 |
+
"eval_rewards/chosen": -0.381531685590744,
|
1000 |
+
"eval_rewards/margins": 2.466177225112915,
|
1001 |
+
"eval_rewards/rejected": -2.8477089405059814,
|
1002 |
+
"eval_runtime": 252.5764,
|
1003 |
+
"eval_samples_per_second": 15.041,
|
1004 |
+
"eval_steps_per_second": 0.238,
|
1005 |
+
"step": 300
|
1006 |
+
},
|
1007 |
+
{
|
1008 |
+
"epoch": 2.2803738317757007,
|
1009 |
+
"grad_norm": 18.288756117670946,
|
1010 |
+
"learning_rate": 1.3728965197250781e-07,
|
1011 |
+
"logits/chosen": -2.35316801071167,
|
1012 |
+
"logits/rejected": -2.318678379058838,
|
1013 |
+
"logps/chosen": -256.4324645996094,
|
1014 |
+
"logps/rejected": -242.2520751953125,
|
1015 |
+
"loss": 0.1359,
|
1016 |
+
"rewards/accuracies": 0.949999988079071,
|
1017 |
+
"rewards/chosen": 0.5630406141281128,
|
1018 |
+
"rewards/margins": 4.296410083770752,
|
1019 |
+
"rewards/rejected": -3.7333693504333496,
|
1020 |
+
"step": 305
|
1021 |
+
},
|
1022 |
+
{
|
1023 |
+
"epoch": 2.317757009345794,
|
1024 |
+
"grad_norm": 16.35995569615145,
|
1025 |
+
"learning_rate": 1.236920812651003e-07,
|
1026 |
+
"logits/chosen": -2.347511053085327,
|
1027 |
+
"logits/rejected": -2.3156919479370117,
|
1028 |
+
"logps/chosen": -234.4215545654297,
|
1029 |
+
"logps/rejected": -240.92129516601562,
|
1030 |
+
"loss": 0.1667,
|
1031 |
+
"rewards/accuracies": 0.918749988079071,
|
1032 |
+
"rewards/chosen": 0.38125258684158325,
|
1033 |
+
"rewards/margins": 4.210916042327881,
|
1034 |
+
"rewards/rejected": -3.8296637535095215,
|
1035 |
+
"step": 310
|
1036 |
+
},
|
1037 |
+
{
|
1038 |
+
"epoch": 2.3551401869158877,
|
1039 |
+
"grad_norm": 15.341078879044524,
|
1040 |
+
"learning_rate": 1.1070802514823913e-07,
|
1041 |
+
"logits/chosen": -2.3151564598083496,
|
1042 |
+
"logits/rejected": -2.291027307510376,
|
1043 |
+
"logps/chosen": -245.5782928466797,
|
1044 |
+
"logps/rejected": -246.9814453125,
|
1045 |
+
"loss": 0.1652,
|
1046 |
+
"rewards/accuracies": 0.925000011920929,
|
1047 |
+
"rewards/chosen": 0.24629132449626923,
|
1048 |
+
"rewards/margins": 4.182533264160156,
|
1049 |
+
"rewards/rejected": -3.9362423419952393,
|
1050 |
+
"step": 315
|
1051 |
+
},
|
1052 |
+
{
|
1053 |
+
"epoch": 2.392523364485981,
|
1054 |
+
"grad_norm": 23.7922647773062,
|
1055 |
+
"learning_rate": 9.835865221146389e-08,
|
1056 |
+
"logits/chosen": -2.301703691482544,
|
1057 |
+
"logits/rejected": -2.290492296218872,
|
1058 |
+
"logps/chosen": -223.250732421875,
|
1059 |
+
"logps/rejected": -263.16534423828125,
|
1060 |
+
"loss": 0.1591,
|
1061 |
+
"rewards/accuracies": 0.96875,
|
1062 |
+
"rewards/chosen": 0.41100868582725525,
|
1063 |
+
"rewards/margins": 4.717347145080566,
|
1064 |
+
"rewards/rejected": -4.306338310241699,
|
1065 |
+
"step": 320
|
1066 |
+
},
|
1067 |
+
{
|
1068 |
+
"epoch": 2.4299065420560746,
|
1069 |
+
"grad_norm": 16.439486015212957,
|
1070 |
+
"learning_rate": 8.666409628694693e-08,
|
1071 |
+
"logits/chosen": -2.319634199142456,
|
1072 |
+
"logits/rejected": -2.2700839042663574,
|
1073 |
+
"logps/chosen": -260.8030700683594,
|
1074 |
+
"logps/rejected": -255.59646606445312,
|
1075 |
+
"loss": 0.1595,
|
1076 |
+
"rewards/accuracies": 0.949999988079071,
|
1077 |
+
"rewards/chosen": 0.4477875232696533,
|
1078 |
+
"rewards/margins": 4.375200271606445,
|
1079 |
+
"rewards/rejected": -3.927412748336792,
|
1080 |
+
"step": 325
|
1081 |
+
},
|
1082 |
+
{
|
1083 |
+
"epoch": 2.467289719626168,
|
1084 |
+
"grad_norm": 14.726547277918746,
|
1085 |
+
"learning_rate": 7.564342362424713e-08,
|
1086 |
+
"logits/chosen": -2.313271999359131,
|
1087 |
+
"logits/rejected": -2.2773654460906982,
|
1088 |
+
"logps/chosen": -262.1775817871094,
|
1089 |
+
"logps/rejected": -245.81216430664062,
|
1090 |
+
"loss": 0.1524,
|
1091 |
+
"rewards/accuracies": 0.9437500238418579,
|
1092 |
+
"rewards/chosen": 0.36651816964149475,
|
1093 |
+
"rewards/margins": 4.084885597229004,
|
1094 |
+
"rewards/rejected": -3.7183678150177,
|
1095 |
+
"step": 330
|
1096 |
+
},
|
1097 |
+
{
|
1098 |
+
"epoch": 2.5046728971962615,
|
1099 |
+
"grad_norm": 11.804281276567055,
|
1100 |
+
"learning_rate": 6.53146018056011e-08,
|
1101 |
+
"logits/chosen": -2.328040599822998,
|
1102 |
+
"logits/rejected": -2.275287389755249,
|
1103 |
+
"logps/chosen": -258.7254638671875,
|
1104 |
+
"logps/rejected": -244.4686737060547,
|
1105 |
+
"loss": 0.1497,
|
1106 |
+
"rewards/accuracies": 0.9375,
|
1107 |
+
"rewards/chosen": 0.21398362517356873,
|
1108 |
+
"rewards/margins": 4.004579067230225,
|
1109 |
+
"rewards/rejected": -3.790595531463623,
|
1110 |
+
"step": 335
|
1111 |
+
},
|
1112 |
+
{
|
1113 |
+
"epoch": 2.542056074766355,
|
1114 |
+
"grad_norm": 17.23503560362695,
|
1115 |
+
"learning_rate": 5.569447045242931e-08,
|
1116 |
+
"logits/chosen": -2.288806676864624,
|
1117 |
+
"logits/rejected": -2.2881455421447754,
|
1118 |
+
"logps/chosen": -238.40414428710938,
|
1119 |
+
"logps/rejected": -256.07183837890625,
|
1120 |
+
"loss": 0.1615,
|
1121 |
+
"rewards/accuracies": 0.90625,
|
1122 |
+
"rewards/chosen": 0.1942141056060791,
|
1123 |
+
"rewards/margins": 3.9404773712158203,
|
1124 |
+
"rewards/rejected": -3.7462635040283203,
|
1125 |
+
"step": 340
|
1126 |
+
},
|
1127 |
+
{
|
1128 |
+
"epoch": 2.5794392523364484,
|
1129 |
+
"grad_norm": 24.74669297879705,
|
1130 |
+
"learning_rate": 4.6798713770814625e-08,
|
1131 |
+
"logits/chosen": -2.3001134395599365,
|
1132 |
+
"logits/rejected": -2.2864573001861572,
|
1133 |
+
"logps/chosen": -238.6791534423828,
|
1134 |
+
"logps/rejected": -238.4395294189453,
|
1135 |
+
"loss": 0.1534,
|
1136 |
+
"rewards/accuracies": 0.9437500238418579,
|
1137 |
+
"rewards/chosen": 0.4987949728965759,
|
1138 |
+
"rewards/margins": 4.1639580726623535,
|
1139 |
+
"rewards/rejected": -3.665163040161133,
|
1140 |
+
"step": 345
|
1141 |
+
},
|
1142 |
+
{
|
1143 |
+
"epoch": 2.616822429906542,
|
1144 |
+
"grad_norm": 18.596924543370314,
|
1145 |
+
"learning_rate": 3.864183498071699e-08,
|
1146 |
+
"logits/chosen": -2.3193628787994385,
|
1147 |
+
"logits/rejected": -2.2843828201293945,
|
1148 |
+
"logps/chosen": -253.653076171875,
|
1149 |
+
"logps/rejected": -278.3705139160156,
|
1150 |
+
"loss": 0.1631,
|
1151 |
+
"rewards/accuracies": 0.949999988079071,
|
1152 |
+
"rewards/chosen": 0.44818204641342163,
|
1153 |
+
"rewards/margins": 4.706423759460449,
|
1154 |
+
"rewards/rejected": -4.258241176605225,
|
1155 |
+
"step": 350
|
1156 |
+
},
|
1157 |
+
{
|
1158 |
+
"epoch": 2.616822429906542,
|
1159 |
+
"eval_logits/chosen": -2.3091881275177,
|
1160 |
+
"eval_logits/rejected": -2.2783515453338623,
|
1161 |
+
"eval_logps/chosen": -252.63780212402344,
|
1162 |
+
"eval_logps/rejected": -248.45175170898438,
|
1163 |
+
"eval_loss": 0.49646082520484924,
|
1164 |
+
"eval_rewards/accuracies": 0.7895833253860474,
|
1165 |
+
"eval_rewards/chosen": -0.4101119339466095,
|
1166 |
+
"eval_rewards/margins": 2.5981762409210205,
|
1167 |
+
"eval_rewards/rejected": -3.0082881450653076,
|
1168 |
+
"eval_runtime": 251.9742,
|
1169 |
+
"eval_samples_per_second": 15.077,
|
1170 |
+
"eval_steps_per_second": 0.238,
|
1171 |
+
"step": 350
|
1172 |
+
},
|
1173 |
+
{
|
1174 |
+
"epoch": 2.6542056074766354,
|
1175 |
+
"grad_norm": 24.678306348653066,
|
1176 |
+
"learning_rate": 3.1237132670611455e-08,
|
1177 |
+
"logits/chosen": -2.3217742443084717,
|
1178 |
+
"logits/rejected": -2.285179853439331,
|
1179 |
+
"logps/chosen": -245.7552032470703,
|
1180 |
+
"logps/rejected": -277.97393798828125,
|
1181 |
+
"loss": 0.1623,
|
1182 |
+
"rewards/accuracies": 0.9312499761581421,
|
1183 |
+
"rewards/chosen": 0.5618477463722229,
|
1184 |
+
"rewards/margins": 4.592735290527344,
|
1185 |
+
"rewards/rejected": -4.030888080596924,
|
1186 |
+
"step": 355
|
1187 |
+
},
|
1188 |
+
{
|
1189 |
+
"epoch": 2.691588785046729,
|
1190 |
+
"grad_norm": 18.396082804478937,
|
1191 |
+
"learning_rate": 2.4596679116099083e-08,
|
1192 |
+
"logits/chosen": -2.304999828338623,
|
1193 |
+
"logits/rejected": -2.295154094696045,
|
1194 |
+
"logps/chosen": -240.2224884033203,
|
1195 |
+
"logps/rejected": -248.5629425048828,
|
1196 |
+
"loss": 0.1735,
|
1197 |
+
"rewards/accuracies": 0.9312499761581421,
|
1198 |
+
"rewards/chosen": 0.357688844203949,
|
1199 |
+
"rewards/margins": 3.9913132190704346,
|
1200 |
+
"rewards/rejected": -3.63362455368042,
|
1201 |
+
"step": 360
|
1202 |
+
},
|
1203 |
+
{
|
1204 |
+
"epoch": 2.7289719626168223,
|
1205 |
+
"grad_norm": 18.24918146544934,
|
1206 |
+
"learning_rate": 1.8731300597841837e-08,
|
1207 |
+
"logits/chosen": -2.304487943649292,
|
1208 |
+
"logits/rejected": -2.260864496231079,
|
1209 |
+
"logps/chosen": -234.8140106201172,
|
1210 |
+
"logps/rejected": -260.767578125,
|
1211 |
+
"loss": 0.161,
|
1212 |
+
"rewards/accuracies": 0.9375,
|
1213 |
+
"rewards/chosen": 0.49128788709640503,
|
1214 |
+
"rewards/margins": 4.2879767417907715,
|
1215 |
+
"rewards/rejected": -3.7966887950897217,
|
1216 |
+
"step": 365
|
1217 |
+
},
|
1218 |
+
{
|
1219 |
+
"epoch": 2.7663551401869158,
|
1220 |
+
"grad_norm": 26.438822654562358,
|
1221 |
+
"learning_rate": 1.365055975090773e-08,
|
1222 |
+
"logits/chosen": -2.3118393421173096,
|
1223 |
+
"logits/rejected": -2.2817904949188232,
|
1224 |
+
"logps/chosen": -252.44638061523438,
|
1225 |
+
"logps/rejected": -268.03570556640625,
|
1226 |
+
"loss": 0.1686,
|
1227 |
+
"rewards/accuracies": 0.9375,
|
1228 |
+
"rewards/chosen": 0.5989893078804016,
|
1229 |
+
"rewards/margins": 4.2095770835876465,
|
1230 |
+
"rewards/rejected": -3.6105875968933105,
|
1231 |
+
"step": 370
|
1232 |
+
},
|
1233 |
+
{
|
1234 |
+
"epoch": 2.803738317757009,
|
1235 |
+
"grad_norm": 16.398293030996605,
|
1236 |
+
"learning_rate": 9.362739974303757e-09,
|
1237 |
+
"logits/chosen": -2.335395097732544,
|
1238 |
+
"logits/rejected": -2.2768070697784424,
|
1239 |
+
"logps/chosen": -243.970703125,
|
1240 |
+
"logps/rejected": -267.5694580078125,
|
1241 |
+
"loss": 0.1565,
|
1242 |
+
"rewards/accuracies": 0.9937499761581421,
|
1243 |
+
"rewards/chosen": 0.38310879468917847,
|
1244 |
+
"rewards/margins": 4.402381896972656,
|
1245 |
+
"rewards/rejected": -4.019272804260254,
|
1246 |
+
"step": 375
|
1247 |
+
},
|
1248 |
+
{
|
1249 |
+
"epoch": 2.8411214953271027,
|
1250 |
+
"grad_norm": 18.034442959036298,
|
1251 |
+
"learning_rate": 5.874831926114931e-09,
|
1252 |
+
"logits/chosen": -2.3112030029296875,
|
1253 |
+
"logits/rejected": -2.281367778778076,
|
1254 |
+
"logps/chosen": -240.7181854248047,
|
1255 |
+
"logps/rejected": -251.8282012939453,
|
1256 |
+
"loss": 0.1504,
|
1257 |
+
"rewards/accuracies": 0.9375,
|
1258 |
+
"rewards/chosen": 0.41617345809936523,
|
1259 |
+
"rewards/margins": 4.074304580688477,
|
1260 |
+
"rewards/rejected": -3.6581311225891113,
|
1261 |
+
"step": 380
|
1262 |
+
},
|
1263 |
+
{
|
1264 |
+
"epoch": 2.878504672897196,
|
1265 |
+
"grad_norm": 17.543650487814592,
|
1266 |
+
"learning_rate": 3.192522126266861e-09,
|
1267 |
+
"logits/chosen": -2.3081631660461426,
|
1268 |
+
"logits/rejected": -2.264885902404785,
|
1269 |
+
"logps/chosen": -244.57107543945312,
|
1270 |
+
"logps/rejected": -248.1013946533203,
|
1271 |
+
"loss": 0.1641,
|
1272 |
+
"rewards/accuracies": 0.956250011920929,
|
1273 |
+
"rewards/chosen": 0.41191452741622925,
|
1274 |
+
"rewards/margins": 4.37111759185791,
|
1275 |
+
"rewards/rejected": -3.9592037200927734,
|
1276 |
+
"step": 385
|
1277 |
+
},
|
1278 |
+
{
|
1279 |
+
"epoch": 2.9158878504672896,
|
1280 |
+
"grad_norm": 15.080570821768335,
|
1281 |
+
"learning_rate": 1.3201836854931924e-09,
|
1282 |
+
"logits/chosen": -2.3092455863952637,
|
1283 |
+
"logits/rejected": -2.2830567359924316,
|
1284 |
+
"logps/chosen": -241.13278198242188,
|
1285 |
+
"logps/rejected": -266.1209716796875,
|
1286 |
+
"loss": 0.1715,
|
1287 |
+
"rewards/accuracies": 0.96875,
|
1288 |
+
"rewards/chosen": 0.32842570543289185,
|
1289 |
+
"rewards/margins": 4.37562370300293,
|
1290 |
+
"rewards/rejected": -4.047197341918945,
|
1291 |
+
"step": 390
|
1292 |
+
},
|
1293 |
+
{
|
1294 |
+
"epoch": 2.953271028037383,
|
1295 |
+
"grad_norm": 20.03660661880128,
|
1296 |
+
"learning_rate": 2.6086917562317957e-10,
|
1297 |
+
"logits/chosen": -2.3180956840515137,
|
1298 |
+
"logits/rejected": -2.277705669403076,
|
1299 |
+
"logps/chosen": -252.68295288085938,
|
1300 |
+
"logps/rejected": -274.5094909667969,
|
1301 |
+
"loss": 0.1565,
|
1302 |
+
"rewards/accuracies": 0.925000011920929,
|
1303 |
+
"rewards/chosen": 0.44486260414123535,
|
1304 |
+
"rewards/margins": 4.4007062911987305,
|
1305 |
+
"rewards/rejected": -3.955843687057495,
|
1306 |
+
"step": 395
|
1307 |
+
},
|
1308 |
+
{
|
1309 |
+
"epoch": 2.983177570093458,
|
1310 |
+
"step": 399,
|
1311 |
+
"total_flos": 4704901791744000.0,
|
1312 |
+
"train_loss": 0.3356622438084213,
|
1313 |
+
"train_runtime": 15341.8197,
|
1314 |
+
"train_samples_per_second": 6.686,
|
1315 |
+
"train_steps_per_second": 0.026
|
1316 |
+
}
|
1317 |
+
],
|
1318 |
+
"logging_steps": 5,
|
1319 |
+
"max_steps": 399,
|
1320 |
+
"num_input_tokens_seen": 0,
|
1321 |
+
"num_train_epochs": 3,
|
1322 |
+
"save_steps": 100,
|
1323 |
+
"stateful_callbacks": {
|
1324 |
+
"TrainerControl": {
|
1325 |
+
"args": {
|
1326 |
+
"should_epoch_stop": false,
|
1327 |
+
"should_evaluate": false,
|
1328 |
+
"should_log": false,
|
1329 |
+
"should_save": true,
|
1330 |
+
"should_training_stop": true
|
1331 |
+
},
|
1332 |
+
"attributes": {}
|
1333 |
+
}
|
1334 |
+
},
|
1335 |
+
"total_flos": 4704901791744000.0,
|
1336 |
+
"train_batch_size": 8,
|
1337 |
+
"trial_name": null,
|
1338 |
+
"trial_params": null
|
1339 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5b35e0c261d5e2790d88034df43598e06af8b46ad9f569dd248ba94a5abea484
|
3 |
+
size 7096
|
training_eval_loss.png
ADDED
![]() |
training_loss.png
ADDED
![]() |
training_rewards_accuracies.png
ADDED
![]() |