chchen commited on
Commit
6011915
·
verified ·
1 Parent(s): 32eb4a4

End of training

Browse files
README.md CHANGED
@@ -3,9 +3,10 @@ base_model: mistralai/Mistral-Nemo-Instruct-2407
3
  library_name: peft
4
  license: apache-2.0
5
  tags:
 
 
6
  - trl
7
  - dpo
8
- - llama-factory
9
  - generated_from_trainer
10
  model-index:
11
  - name: Mistral-Nemo-12B-Instruct-SAA-Half
@@ -17,7 +18,19 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  # Mistral-Nemo-12B-Instruct-SAA-Half
19
 
20
- This model is a fine-tuned version of [mistralai/Mistral-Nemo-Instruct-2407](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407) on an unknown dataset.
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  ## Model description
23
 
 
3
  library_name: peft
4
  license: apache-2.0
5
  tags:
6
+ - llama-factory
7
+ - lora
8
  - trl
9
  - dpo
 
10
  - generated_from_trainer
11
  model-index:
12
  - name: Mistral-Nemo-12B-Instruct-SAA-Half
 
18
 
19
  # Mistral-Nemo-12B-Instruct-SAA-Half
20
 
21
+ This model is a fine-tuned version of [mistralai/Mistral-Nemo-Instruct-2407](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407) on the bct_non_cot_dpo_500 dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 0.5932
24
+ - Rewards/chosen: -0.0554
25
+ - Rewards/rejected: -0.0900
26
+ - Rewards/accuracies: 0.7800
27
+ - Rewards/margins: 0.0346
28
+ - Logps/rejected: -0.9001
29
+ - Logps/chosen: -0.5538
30
+ - Logits/rejected: -2.3124
31
+ - Logits/chosen: -2.2862
32
+ - Sft Loss: 0.0563
33
+ - Odds Ratio Loss: 5.3690
34
 
35
  ## Model description
36
 
all_results.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.986666666666667,
3
+ "eval_logits/chosen": -2.286210060119629,
4
+ "eval_logits/rejected": -2.3124423027038574,
5
+ "eval_logps/chosen": -0.5537902116775513,
6
+ "eval_logps/rejected": -0.900057315826416,
7
+ "eval_loss": 0.5931902527809143,
8
+ "eval_odds_ratio_loss": 5.369002819061279,
9
+ "eval_rewards/accuracies": 0.7799999713897705,
10
+ "eval_rewards/chosen": -0.055379029363393784,
11
+ "eval_rewards/margins": 0.034626711159944534,
12
+ "eval_rewards/rejected": -0.09000573307275772,
13
+ "eval_runtime": 2.8075,
14
+ "eval_samples_per_second": 17.809,
15
+ "eval_sft_loss": 0.056289929896593094,
16
+ "eval_steps_per_second": 8.905,
17
+ "total_flos": 2.2783332409344e+16,
18
+ "train_loss": 0.8738818849836077,
19
+ "train_runtime": 261.5721,
20
+ "train_samples_per_second": 5.161,
21
+ "train_steps_per_second": 0.321
22
+ }
eval_results.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.986666666666667,
3
+ "eval_logits/chosen": -2.286210060119629,
4
+ "eval_logits/rejected": -2.3124423027038574,
5
+ "eval_logps/chosen": -0.5537902116775513,
6
+ "eval_logps/rejected": -0.900057315826416,
7
+ "eval_loss": 0.5931902527809143,
8
+ "eval_odds_ratio_loss": 5.369002819061279,
9
+ "eval_rewards/accuracies": 0.7799999713897705,
10
+ "eval_rewards/chosen": -0.055379029363393784,
11
+ "eval_rewards/margins": 0.034626711159944534,
12
+ "eval_rewards/rejected": -0.09000573307275772,
13
+ "eval_runtime": 2.8075,
14
+ "eval_samples_per_second": 17.809,
15
+ "eval_sft_loss": 0.056289929896593094,
16
+ "eval_steps_per_second": 8.905
17
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.986666666666667,
3
+ "total_flos": 2.2783332409344e+16,
4
+ "train_loss": 0.8738818849836077,
5
+ "train_runtime": 261.5721,
6
+ "train_samples_per_second": 5.161,
7
+ "train_steps_per_second": 0.321
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.986666666666667,
5
+ "eval_steps": 500,
6
+ "global_step": 84,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.35555555555555557,
13
+ "grad_norm": 4.364832401275635,
14
+ "learning_rate": 4.997807075247147e-06,
15
+ "logits/chosen": -2.3599021434783936,
16
+ "logits/rejected": -2.3986287117004395,
17
+ "logps/chosen": -1.2810232639312744,
18
+ "logps/rejected": -1.6647472381591797,
19
+ "loss": 1.334,
20
+ "odds_ratio_loss": 11.882925987243652,
21
+ "rewards/accuracies": 0.7875000238418579,
22
+ "rewards/chosen": -0.12810233235359192,
23
+ "rewards/margins": 0.03837240859866142,
24
+ "rewards/rejected": -0.16647472977638245,
25
+ "sft_loss": 0.14570708572864532,
26
+ "step": 10
27
+ },
28
+ {
29
+ "epoch": 0.7111111111111111,
30
+ "grad_norm": 3.797199010848999,
31
+ "learning_rate": 4.7392794005985324e-06,
32
+ "logits/chosen": -2.176560878753662,
33
+ "logits/rejected": -2.2147622108459473,
34
+ "logps/chosen": -1.1564085483551025,
35
+ "logps/rejected": -1.6085771322250366,
36
+ "loss": 1.2054,
37
+ "odds_ratio_loss": 10.778497695922852,
38
+ "rewards/accuracies": 0.8062499761581421,
39
+ "rewards/chosen": -0.11564085632562637,
40
+ "rewards/margins": 0.04521685466170311,
41
+ "rewards/rejected": -0.160857692360878,
42
+ "sft_loss": 0.1275218427181244,
43
+ "step": 20
44
+ },
45
+ {
46
+ "epoch": 1.0666666666666667,
47
+ "grad_norm": 3.668826103210449,
48
+ "learning_rate": 4.093559974371725e-06,
49
+ "logits/chosen": -2.3584961891174316,
50
+ "logits/rejected": -2.3714914321899414,
51
+ "logps/chosen": -1.0279743671417236,
52
+ "logps/rejected": -1.439477562904358,
53
+ "loss": 1.0773,
54
+ "odds_ratio_loss": 9.591609001159668,
55
+ "rewards/accuracies": 0.8062499761581421,
56
+ "rewards/chosen": -0.10279743373394012,
57
+ "rewards/margins": 0.041150324046611786,
58
+ "rewards/rejected": -0.1439477652311325,
59
+ "sft_loss": 0.11812162399291992,
60
+ "step": 30
61
+ },
62
+ {
63
+ "epoch": 1.4222222222222223,
64
+ "grad_norm": 2.7432360649108887,
65
+ "learning_rate": 3.1722995515381644e-06,
66
+ "logits/chosen": -2.2854158878326416,
67
+ "logits/rejected": -2.3222243785858154,
68
+ "logps/chosen": -0.8480159640312195,
69
+ "logps/rejected": -1.3128085136413574,
70
+ "loss": 0.8927,
71
+ "odds_ratio_loss": 8.068161010742188,
72
+ "rewards/accuracies": 0.8187500238418579,
73
+ "rewards/chosen": -0.08480159938335419,
74
+ "rewards/margins": 0.0464792437851429,
75
+ "rewards/rejected": -0.1312808394432068,
76
+ "sft_loss": 0.08589236438274384,
77
+ "step": 40
78
+ },
79
+ {
80
+ "epoch": 1.7777777777777777,
81
+ "grad_norm": 2.6973206996917725,
82
+ "learning_rate": 2.134792428593971e-06,
83
+ "logits/chosen": -2.3095250129699707,
84
+ "logits/rejected": -2.336907386779785,
85
+ "logps/chosen": -0.6922627687454224,
86
+ "logps/rejected": -1.121113896369934,
87
+ "loss": 0.7399,
88
+ "odds_ratio_loss": 6.732758522033691,
89
+ "rewards/accuracies": 0.7875000238418579,
90
+ "rewards/chosen": -0.06922627240419388,
91
+ "rewards/margins": 0.042885102331638336,
92
+ "rewards/rejected": -0.11211137473583221,
93
+ "sft_loss": 0.06660701334476471,
94
+ "step": 50
95
+ },
96
+ {
97
+ "epoch": 2.1333333333333333,
98
+ "grad_norm": 1.8098381757736206,
99
+ "learning_rate": 1.160433012552508e-06,
100
+ "logits/chosen": -2.3899528980255127,
101
+ "logits/rejected": -2.4369609355926514,
102
+ "logps/chosen": -0.5784354209899902,
103
+ "logps/rejected": -1.0445263385772705,
104
+ "loss": 0.63,
105
+ "odds_ratio_loss": 5.705307960510254,
106
+ "rewards/accuracies": 0.7875000238418579,
107
+ "rewards/chosen": -0.05784354358911514,
108
+ "rewards/margins": 0.04660910367965698,
109
+ "rewards/rejected": -0.10445265471935272,
110
+ "sft_loss": 0.05942065268754959,
111
+ "step": 60
112
+ },
113
+ {
114
+ "epoch": 2.488888888888889,
115
+ "grad_norm": 2.6440157890319824,
116
+ "learning_rate": 4.1769689822475147e-07,
117
+ "logits/chosen": -2.3521196842193604,
118
+ "logits/rejected": -2.39290189743042,
119
+ "logps/chosen": -0.5662254095077515,
120
+ "logps/rejected": -1.0197746753692627,
121
+ "loss": 0.6132,
122
+ "odds_ratio_loss": 5.592169761657715,
123
+ "rewards/accuracies": 0.8062499761581421,
124
+ "rewards/chosen": -0.056622546166181564,
125
+ "rewards/margins": 0.045354925096035004,
126
+ "rewards/rejected": -0.10197745263576508,
127
+ "sft_loss": 0.05396850034594536,
128
+ "step": 70
129
+ },
130
+ {
131
+ "epoch": 2.8444444444444446,
132
+ "grad_norm": 2.6655380725860596,
133
+ "learning_rate": 3.5009907323737826e-08,
134
+ "logits/chosen": -2.3952314853668213,
135
+ "logits/rejected": -2.4219796657562256,
136
+ "logps/chosen": -0.5315676927566528,
137
+ "logps/rejected": -0.9738686680793762,
138
+ "loss": 0.5745,
139
+ "odds_ratio_loss": 5.228327751159668,
140
+ "rewards/accuracies": 0.8062499761581421,
141
+ "rewards/chosen": -0.053156763315200806,
142
+ "rewards/margins": 0.044230107218027115,
143
+ "rewards/rejected": -0.09738686680793762,
144
+ "sft_loss": 0.05164428427815437,
145
+ "step": 80
146
+ },
147
+ {
148
+ "epoch": 2.986666666666667,
149
+ "step": 84,
150
+ "total_flos": 2.2783332409344e+16,
151
+ "train_loss": 0.8738818849836077,
152
+ "train_runtime": 261.5721,
153
+ "train_samples_per_second": 5.161,
154
+ "train_steps_per_second": 0.321
155
+ }
156
+ ],
157
+ "logging_steps": 10,
158
+ "max_steps": 84,
159
+ "num_input_tokens_seen": 0,
160
+ "num_train_epochs": 3,
161
+ "save_steps": 500,
162
+ "stateful_callbacks": {
163
+ "TrainerControl": {
164
+ "args": {
165
+ "should_epoch_stop": false,
166
+ "should_evaluate": false,
167
+ "should_log": false,
168
+ "should_save": true,
169
+ "should_training_stop": true
170
+ },
171
+ "attributes": {}
172
+ }
173
+ },
174
+ "total_flos": 2.2783332409344e+16,
175
+ "train_batch_size": 2,
176
+ "trial_name": null,
177
+ "trial_params": null
178
+ }
training_loss.png ADDED
training_rewards_accuracies.png ADDED