chenggong1995 commited on
Commit
46bc97a
·
verified ·
1 Parent(s): bc68686

Model save

Browse files
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: allenai/OLMoE-1B-7B-0125-Instruct
3
+ library_name: transformers
4
+ model_name: OLMoE-1B-7B-0125-Instruct-grpo-E6-D8000-L4096
5
+ tags:
6
+ - generated_from_trainer
7
+ - trl
8
+ - grpo
9
+ licence: license
10
+ ---
11
+
12
+ # Model Card for OLMoE-1B-7B-0125-Instruct-grpo-E6-D8000-L4096
13
+
14
+ This model is a fine-tuned version of [allenai/OLMoE-1B-7B-0125-Instruct](https://huggingface.co/allenai/OLMoE-1B-7B-0125-Instruct).
15
+ It has been trained using [TRL](https://github.com/huggingface/trl).
16
+
17
+ ## Quick start
18
+
19
+ ```python
20
+ from transformers import pipeline
21
+
22
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
23
+ generator = pipeline("text-generation", model="chenggong1995/OLMoE-1B-7B-0125-Instruct-grpo-E6-D8000-L4096", device="cuda")
24
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
25
+ print(output["generated_text"])
26
+ ```
27
+
28
+ ## Training procedure
29
+
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/gongc1995-city-university-of-hong-kong/huggingface/runs/z7mkjfwj)
31
+
32
+
33
+ This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
34
+
35
+ ### Framework versions
36
+
37
+ - TRL: 0.16.0.dev0
38
+ - Transformers: 4.49.0
39
+ - Pytorch: 2.5.1
40
+ - Datasets: 3.3.2
41
+ - Tokenizers: 0.21.0
42
+
43
+ ## Citations
44
+
45
+ Cite GRPO as:
46
+
47
+ ```bibtex
48
+ @article{zhihong2024deepseekmath,
49
+ title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
50
+ author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
51
+ year = 2024,
52
+ eprint = {arXiv:2402.03300},
53
+ }
54
+
55
+ ```
56
+
57
+ Cite TRL as:
58
+
59
+ ```bibtex
60
+ @misc{vonwerra2022trl,
61
+ title = {{TRL: Transformer Reinforcement Learning}},
62
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
63
+ year = 2020,
64
+ journal = {GitHub repository},
65
+ publisher = {GitHub},
66
+ howpublished = {\url{https://github.com/huggingface/trl}}
67
+ }
68
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 0.03362821891328426,
4
+ "train_runtime": 77437.8148,
5
+ "train_samples": 8000,
6
+ "train_samples_per_second": 0.62,
7
+ "train_steps_per_second": 0.003
8
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "eos_token_id": 50279,
4
+ "pad_token_id": 1,
5
+ "transformers_version": "4.49.0"
6
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 0.03362821891328426,
4
+ "train_runtime": 77437.8148,
5
+ "train_samples": 8000,
6
+ "train_samples_per_second": 0.62,
7
+ "train_steps_per_second": 0.003
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,751 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 5.984,
5
+ "eval_steps": 100,
6
+ "global_step": 246,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "clip_ratio": 0.0,
13
+ "completion_length": 583.2621688842773,
14
+ "epoch": 0.12,
15
+ "grad_norm": 0.12920165061950684,
16
+ "kl": 0.0007047017415364583,
17
+ "learning_rate": 4e-07,
18
+ "loss": 0.0039,
19
+ "reward": 0.07673611293236414,
20
+ "reward_std": 0.12897611850251753,
21
+ "rewards/accuracy_reward": 0.07100694642091791,
22
+ "rewards/format_reward": 0.0057291668374091385,
23
+ "step": 5
24
+ },
25
+ {
26
+ "clip_ratio": 0.0,
27
+ "completion_length": 576.849148050944,
28
+ "epoch": 0.24,
29
+ "grad_norm": 0.14590902626514435,
30
+ "kl": 0.0013418197631835938,
31
+ "learning_rate": 8e-07,
32
+ "loss": 0.0005,
33
+ "reward": 0.0871527800646921,
34
+ "reward_std": 0.13713855588187773,
35
+ "rewards/accuracy_reward": 0.081250002173086,
36
+ "rewards/format_reward": 0.005902777938172221,
37
+ "step": 10
38
+ },
39
+ {
40
+ "clip_ratio": 0.0,
41
+ "completion_length": 595.0527969360352,
42
+ "epoch": 0.36,
43
+ "grad_norm": 0.13242916762828827,
44
+ "kl": 0.0022638956705729168,
45
+ "learning_rate": 1.2e-06,
46
+ "loss": 0.0035,
47
+ "reward": 0.08645833590999245,
48
+ "reward_std": 0.12982427552342415,
49
+ "rewards/accuracy_reward": 0.07795139101023475,
50
+ "rewards/format_reward": 0.008506944651405017,
51
+ "step": 15
52
+ },
53
+ {
54
+ "clip_ratio": 0.0,
55
+ "completion_length": 589.0404724121094,
56
+ "epoch": 0.48,
57
+ "grad_norm": 0.19261103868484497,
58
+ "kl": 0.006444295247395833,
59
+ "learning_rate": 1.6e-06,
60
+ "loss": 0.0083,
61
+ "reward": 0.12013889234513045,
62
+ "reward_std": 0.17057897535463173,
63
+ "rewards/accuracy_reward": 0.10694444783342381,
64
+ "rewards/format_reward": 0.013194444729015231,
65
+ "step": 20
66
+ },
67
+ {
68
+ "clip_ratio": 0.0,
69
+ "completion_length": 600.2750193277994,
70
+ "epoch": 0.6,
71
+ "grad_norm": 0.18090881407260895,
72
+ "kl": 0.00933685302734375,
73
+ "learning_rate": 2e-06,
74
+ "loss": 0.011,
75
+ "reward": 0.12621528124436737,
76
+ "reward_std": 0.1751370935390393,
77
+ "rewards/accuracy_reward": 0.11458333623595536,
78
+ "rewards/format_reward": 0.011631944697971146,
79
+ "step": 25
80
+ },
81
+ {
82
+ "clip_ratio": 0.0,
83
+ "completion_length": 577.3231079101563,
84
+ "epoch": 0.72,
85
+ "grad_norm": 0.14372943341732025,
86
+ "kl": 0.009203084309895833,
87
+ "learning_rate": 1.9974751105436262e-06,
88
+ "loss": 0.0063,
89
+ "reward": 0.13611111496575176,
90
+ "reward_std": 0.17103372573231657,
91
+ "rewards/accuracy_reward": 0.1302083367947489,
92
+ "rewards/format_reward": 0.005902777938172221,
93
+ "step": 30
94
+ },
95
+ {
96
+ "clip_ratio": 0.0,
97
+ "completion_length": 564.1434224446615,
98
+ "epoch": 0.84,
99
+ "grad_norm": 0.15495018661022186,
100
+ "kl": 0.009105428059895834,
101
+ "learning_rate": 1.98991319230804e-06,
102
+ "loss": 0.0107,
103
+ "reward": 0.1550347256163756,
104
+ "reward_std": 0.19651179468880098,
105
+ "rewards/accuracy_reward": 0.14583333718279998,
106
+ "rewards/format_reward": 0.009201389101023475,
107
+ "step": 35
108
+ },
109
+ {
110
+ "clip_ratio": 0.0,
111
+ "completion_length": 600.4140803019205,
112
+ "epoch": 0.96,
113
+ "grad_norm": 0.17615483701229095,
114
+ "kl": 0.012515767415364584,
115
+ "learning_rate": 1.9773524313084854e-06,
116
+ "loss": 0.0119,
117
+ "reward": 0.15885417150954406,
118
+ "reward_std": 0.2019161203255256,
119
+ "rewards/accuracy_reward": 0.14166667039195696,
120
+ "rewards/format_reward": 0.017187500388051072,
121
+ "step": 40
122
+ },
123
+ {
124
+ "clip_ratio": 0.0,
125
+ "completion_length": 631.5385569254557,
126
+ "epoch": 1.096,
127
+ "grad_norm": 0.16588236391544342,
128
+ "kl": 0.014839680989583333,
129
+ "learning_rate": 1.959856256610988e-06,
130
+ "loss": 0.0096,
131
+ "reward": 0.18350694837669532,
132
+ "reward_std": 0.2348036120335261,
133
+ "rewards/accuracy_reward": 0.15277778275000553,
134
+ "rewards/format_reward": 0.030729167473812897,
135
+ "step": 45
136
+ },
137
+ {
138
+ "clip_ratio": 0.0,
139
+ "completion_length": 593.8875162760417,
140
+ "epoch": 1.216,
141
+ "grad_norm": 0.18021586537361145,
142
+ "kl": 0.016481526692708335,
143
+ "learning_rate": 1.9375130200295876e-06,
144
+ "loss": 0.0189,
145
+ "reward": 0.19756944961845874,
146
+ "reward_std": 0.24355731457471846,
147
+ "rewards/accuracy_reward": 0.16006944881131252,
148
+ "rewards/format_reward": 0.03750000107102096,
149
+ "step": 50
150
+ },
151
+ {
152
+ "clip_ratio": 0.0,
153
+ "completion_length": 587.4776229858398,
154
+ "epoch": 1.336,
155
+ "grad_norm": 0.1994011402130127,
156
+ "kl": 0.0164459228515625,
157
+ "learning_rate": 1.9104355499692162e-06,
158
+ "loss": 0.0176,
159
+ "reward": 0.2189236176510652,
160
+ "reward_std": 0.28772813665370145,
161
+ "rewards/accuracy_reward": 0.14583333767950535,
162
+ "rewards/format_reward": 0.07309028026647865,
163
+ "step": 55
164
+ },
165
+ {
166
+ "clip_ratio": 0.0,
167
+ "completion_length": 575.6356079101563,
168
+ "epoch": 1.456,
169
+ "grad_norm": 0.23497304320335388,
170
+ "kl": 0.02615966796875,
171
+ "learning_rate": 1.8787605816671951e-06,
172
+ "loss": 0.0222,
173
+ "reward": 0.33281251018246016,
174
+ "reward_std": 0.3787623425324758,
175
+ "rewards/accuracy_reward": 0.13923611460874477,
176
+ "rewards/format_reward": 0.19357639501492183,
177
+ "step": 60
178
+ },
179
+ {
180
+ "clip_ratio": 0.0,
181
+ "completion_length": 581.5133870442709,
182
+ "epoch": 1.576,
183
+ "grad_norm": 0.2583252191543579,
184
+ "kl": 0.039479573567708336,
185
+ "learning_rate": 1.8426480667105175e-06,
186
+ "loss": 0.0407,
187
+ "reward": 0.4626736263434092,
188
+ "reward_std": 0.45475957343975704,
189
+ "rewards/accuracy_reward": 0.13229167129223546,
190
+ "rewards/format_reward": 0.33038195346792537,
191
+ "step": 65
192
+ },
193
+ {
194
+ "clip_ratio": 0.0,
195
+ "completion_length": 575.3401219685873,
196
+ "epoch": 1.696,
197
+ "grad_norm": 0.2477940022945404,
198
+ "kl": 0.04772135416666667,
199
+ "learning_rate": 1.8022803653156982e-06,
200
+ "loss": 0.037,
201
+ "reward": 0.6152777954936027,
202
+ "reward_std": 0.4820722574989001,
203
+ "rewards/accuracy_reward": 0.11440972487131755,
204
+ "rewards/format_reward": 0.5008680661519368,
205
+ "step": 70
206
+ },
207
+ {
208
+ "clip_ratio": 0.0,
209
+ "completion_length": 592.2732818603515,
210
+ "epoch": 1.8159999999999998,
211
+ "grad_norm": 0.240928515791893,
212
+ "kl": 0.045609537760416666,
213
+ "learning_rate": 1.7578613254499968e-06,
214
+ "loss": 0.0367,
215
+ "reward": 0.6859375188748041,
216
+ "reward_std": 0.48161858022212983,
217
+ "rewards/accuracy_reward": 0.1130208361428231,
218
+ "rewards/format_reward": 0.5729166840513548,
219
+ "step": 75
220
+ },
221
+ {
222
+ "clip_ratio": 0.0,
223
+ "completion_length": 570.3640808105469,
224
+ "epoch": 1.936,
225
+ "grad_norm": 0.30649441480636597,
226
+ "kl": 0.05423177083333333,
227
+ "learning_rate": 1.7096152534442513e-06,
228
+ "loss": 0.0334,
229
+ "reward": 0.7704861332972844,
230
+ "reward_std": 0.4483942608038584,
231
+ "rewards/accuracy_reward": 0.10572916980211934,
232
+ "rewards/format_reward": 0.6647569671273231,
233
+ "step": 80
234
+ },
235
+ {
236
+ "clip_ratio": 0.0,
237
+ "completion_length": 560.8760559082032,
238
+ "epoch": 2.072,
239
+ "grad_norm": 0.25217875838279724,
240
+ "kl": 0.054768880208333336,
241
+ "learning_rate": 1.6577857812954991e-06,
242
+ "loss": 0.0375,
243
+ "reward": 0.8114583571751912,
244
+ "reward_std": 0.4467007691661517,
245
+ "rewards/accuracy_reward": 0.12256944736776253,
246
+ "rewards/format_reward": 0.6888889064391454,
247
+ "step": 85
248
+ },
249
+ {
250
+ "clip_ratio": 0.0,
251
+ "completion_length": 565.7517557779948,
252
+ "epoch": 2.192,
253
+ "grad_norm": 0.27332058548927307,
254
+ "kl": 0.0546630859375,
255
+ "learning_rate": 1.6026346363792564e-06,
256
+ "loss": 0.0329,
257
+ "reward": 0.8633680770794551,
258
+ "reward_std": 0.47069497853517533,
259
+ "rewards/accuracy_reward": 0.15156250428408385,
260
+ "rewards/format_reward": 0.711805577079455,
261
+ "step": 90
262
+ },
263
+ {
264
+ "clip_ratio": 0.0,
265
+ "completion_length": 564.6762344360352,
266
+ "epoch": 2.312,
267
+ "grad_norm": 0.22232523560523987,
268
+ "kl": 0.05868123372395833,
269
+ "learning_rate": 1.5444403197841344e-06,
270
+ "loss": 0.0211,
271
+ "reward": 0.8944444666306178,
272
+ "reward_std": 0.42260901977618537,
273
+ "rewards/accuracy_reward": 0.1461805594774584,
274
+ "rewards/format_reward": 0.7482639104127884,
275
+ "step": 95
276
+ },
277
+ {
278
+ "epoch": 2.432,
279
+ "grad_norm": 0.21494214236736298,
280
+ "learning_rate": 1.4834966999429178e-06,
281
+ "loss": 0.0271,
282
+ "step": 100
283
+ },
284
+ {
285
+ "epoch": 2.432,
286
+ "eval_clip_ratio": 0.0,
287
+ "eval_completion_length": 528.4134756234976,
288
+ "eval_kl": 0.060819185697115384,
289
+ "eval_loss": 0.013302656821906567,
290
+ "eval_reward": 0.97275644999284,
291
+ "eval_reward_std": 0.405555764069924,
292
+ "eval_rewards/accuracy_reward": 0.16025641531898424,
293
+ "eval_rewards/format_reward": 0.8125000275098361,
294
+ "eval_runtime": 164.4929,
295
+ "eval_samples_per_second": 0.602,
296
+ "eval_steps_per_second": 0.018,
297
+ "step": 100
298
+ },
299
+ {
300
+ "clip_ratio": 0.0,
301
+ "completion_length": 562.4533172607422,
302
+ "epoch": 2.552,
303
+ "grad_norm": 0.24463680386543274,
304
+ "kl": 0.06415608723958334,
305
+ "learning_rate": 1.4201115286619464e-06,
306
+ "loss": 0.0246,
307
+ "reward": 0.9512153029441833,
308
+ "reward_std": 0.4153951602677504,
309
+ "rewards/accuracy_reward": 0.16093750478078922,
310
+ "rewards/format_reward": 0.7902777964870135,
311
+ "step": 105
312
+ },
313
+ {
314
+ "clip_ratio": 0.0,
315
+ "completion_length": 558.5052256266276,
316
+ "epoch": 2.672,
317
+ "grad_norm": 0.22716720402240753,
318
+ "kl": 0.066162109375,
319
+ "learning_rate": 1.3546048870425354e-06,
320
+ "loss": 0.0242,
321
+ "reward": 0.9326389094193777,
322
+ "reward_std": 0.4124096731344859,
323
+ "rewards/accuracy_reward": 0.15625000453243654,
324
+ "rewards/format_reward": 0.7763889074325562,
325
+ "step": 110
326
+ },
327
+ {
328
+ "clip_ratio": 0.0,
329
+ "completion_length": 547.4661656697591,
330
+ "epoch": 2.792,
331
+ "grad_norm": 0.2325204312801361,
332
+ "kl": 0.07105712890625,
333
+ "learning_rate": 1.2873075691421806e-06,
334
+ "loss": 0.0198,
335
+ "reward": 0.9222222457329432,
336
+ "reward_std": 0.4309779698650042,
337
+ "rewards/accuracy_reward": 0.16579861616094907,
338
+ "rewards/format_reward": 0.7564236293236415,
339
+ "step": 115
340
+ },
341
+ {
342
+ "clip_ratio": 0.0,
343
+ "completion_length": 547.4175537109375,
344
+ "epoch": 2.912,
345
+ "grad_norm": 0.23773518204689026,
346
+ "kl": 0.06711832682291667,
347
+ "learning_rate": 1.218559411537699e-06,
348
+ "loss": 0.0211,
349
+ "reward": 0.894270858168602,
350
+ "reward_std": 0.41917893588542937,
351
+ "rewards/accuracy_reward": 0.15920139361793798,
352
+ "rewards/format_reward": 0.7350694666306178,
353
+ "step": 120
354
+ },
355
+ {
356
+ "clip_ratio": 0.0,
357
+ "completion_length": 544.4187637329102,
358
+ "epoch": 3.048,
359
+ "grad_norm": 0.25982293486595154,
360
+ "kl": 0.07174072265625,
361
+ "learning_rate": 1.1487075772256517e-06,
362
+ "loss": 0.029,
363
+ "reward": 0.8729166895151138,
364
+ "reward_std": 0.4223095287879308,
365
+ "rewards/accuracy_reward": 0.16006944874922435,
366
+ "rewards/format_reward": 0.7128472457329432,
367
+ "step": 125
368
+ },
369
+ {
370
+ "clip_ratio": 0.0,
371
+ "completion_length": 520.4824803670248,
372
+ "epoch": 3.168,
373
+ "grad_norm": 0.24558736383914948,
374
+ "kl": 0.08404541015625,
375
+ "learning_rate": 1.0781048025259646e-06,
376
+ "loss": 0.0261,
377
+ "reward": 0.8881944636503856,
378
+ "reward_std": 0.42243550966183346,
379
+ "rewards/accuracy_reward": 0.16284722611308097,
380
+ "rewards/format_reward": 0.725347238779068,
381
+ "step": 130
382
+ },
383
+ {
384
+ "clip_ratio": 0.0,
385
+ "completion_length": 537.1248448689779,
386
+ "epoch": 3.288,
387
+ "grad_norm": 0.29106763005256653,
388
+ "kl": 0.07824300130208334,
389
+ "learning_rate": 1.0071076158414974e-06,
390
+ "loss": 0.0249,
391
+ "reward": 0.9265625198682149,
392
+ "reward_std": 0.41332067002852757,
393
+ "rewards/accuracy_reward": 0.17378472685813903,
394
+ "rewards/format_reward": 0.7527777969837188,
395
+ "step": 135
396
+ },
397
+ {
398
+ "clip_ratio": 0.0,
399
+ "completion_length": 550.350016784668,
400
+ "epoch": 3.408,
401
+ "grad_norm": 0.2443486452102661,
402
+ "kl": 0.071923828125,
403
+ "learning_rate": 9.360745372684345e-07,
404
+ "loss": 0.0295,
405
+ "reward": 0.8960069666306177,
406
+ "reward_std": 0.4272393837571144,
407
+ "rewards/accuracy_reward": 0.17343750571211178,
408
+ "rewards/format_reward": 0.7225694636503855,
409
+ "step": 140
410
+ },
411
+ {
412
+ "clip_ratio": 0.0,
413
+ "completion_length": 543.1215454101563,
414
+ "epoch": 3.528,
415
+ "grad_norm": 0.24748581647872925,
416
+ "kl": 0.07239176432291666,
417
+ "learning_rate": 8.653642681490607e-07,
418
+ "loss": 0.0247,
419
+ "reward": 0.9074653029441834,
420
+ "reward_std": 0.4173097605506579,
421
+ "rewards/accuracy_reward": 0.1642361162851254,
422
+ "rewards/format_reward": 0.7432291815678279,
423
+ "step": 145
424
+ },
425
+ {
426
+ "clip_ratio": 0.0,
427
+ "completion_length": 539.6696355183919,
428
+ "epoch": 3.648,
429
+ "grad_norm": 0.2420652210712433,
430
+ "kl": 0.0806884765625,
431
+ "learning_rate": 7.953338797092901e-07,
432
+ "loss": 0.0247,
433
+ "reward": 0.9553819715976715,
434
+ "reward_std": 0.39353689054648083,
435
+ "rewards/accuracy_reward": 0.15902778361923992,
436
+ "rewards/format_reward": 0.7963541895151138,
437
+ "step": 150
438
+ },
439
+ {
440
+ "clip_ratio": 0.0,
441
+ "completion_length": 542.0953282674153,
442
+ "epoch": 3.768,
443
+ "grad_norm": 0.22927226126194,
444
+ "kl": 0.08297119140625,
445
+ "learning_rate": 7.263370099279171e-07,
446
+ "loss": 0.0375,
447
+ "reward": 0.9421875288089117,
448
+ "reward_std": 0.3737917934854825,
449
+ "rewards/accuracy_reward": 0.14618056000520785,
450
+ "rewards/format_reward": 0.7960069606701533,
451
+ "step": 155
452
+ },
453
+ {
454
+ "clip_ratio": 0.0,
455
+ "completion_length": 560.2560948689778,
456
+ "epoch": 3.888,
457
+ "grad_norm": 0.2442265897989273,
458
+ "kl": 0.08262532552083333,
459
+ "learning_rate": 6.587220777430095e-07,
460
+ "loss": 0.0438,
461
+ "reward": 0.9187500218550364,
462
+ "reward_std": 0.3985011622309685,
463
+ "rewards/accuracy_reward": 0.1730902827034394,
464
+ "rewards/format_reward": 0.7456597417593003,
465
+ "step": 160
466
+ },
467
+ {
468
+ "clip_ratio": 0.0,
469
+ "completion_length": 549.7503621419271,
470
+ "epoch": 4.024,
471
+ "grad_norm": 0.36080020666122437,
472
+ "kl": 0.09295247395833334,
473
+ "learning_rate": 5.928305236133016e-07,
474
+ "loss": 0.0348,
475
+ "reward": 0.8918403009573619,
476
+ "reward_std": 0.42846539815266926,
477
+ "rewards/accuracy_reward": 0.16701389451821644,
478
+ "rewards/format_reward": 0.7248264094193776,
479
+ "step": 165
480
+ },
481
+ {
482
+ "clip_ratio": 0.0,
483
+ "completion_length": 527.6090423583985,
484
+ "epoch": 4.144,
485
+ "grad_norm": 0.2888505756855011,
486
+ "kl": 0.0927978515625,
487
+ "learning_rate": 5.289950853193652e-07,
488
+ "loss": 0.0569,
489
+ "reward": 0.9263889143864313,
490
+ "reward_std": 0.4129257212082545,
491
+ "rewards/accuracy_reward": 0.17309028257926304,
492
+ "rewards/format_reward": 0.75329862733682,
493
+ "step": 170
494
+ },
495
+ {
496
+ "clip_ratio": 0.0,
497
+ "completion_length": 521.2873443603515,
498
+ "epoch": 4.264,
499
+ "grad_norm": 0.2377632111310959,
500
+ "kl": 0.09527180989583334,
501
+ "learning_rate": 4.6753811771138365e-07,
502
+ "loss": 0.0401,
503
+ "reward": 0.9312500258286794,
504
+ "reward_std": 0.3798152153690656,
505
+ "rewards/accuracy_reward": 0.16128472660978635,
506
+ "rewards/format_reward": 0.7699652989705403,
507
+ "step": 175
508
+ },
509
+ {
510
+ "clip_ratio": 0.0,
511
+ "completion_length": 545.0975886027019,
512
+ "epoch": 4.384,
513
+ "grad_norm": 0.2456796020269394,
514
+ "kl": 0.093505859375,
515
+ "learning_rate": 4.0876996488842475e-07,
516
+ "loss": 0.0539,
517
+ "reward": 0.9411458532015483,
518
+ "reward_std": 0.40113388895988467,
519
+ "rewards/accuracy_reward": 0.17621528282761573,
520
+ "rewards/format_reward": 0.7649305760860443,
521
+ "step": 180
522
+ },
523
+ {
524
+ "clip_ratio": 0.0,
525
+ "completion_length": 552.6007125854492,
526
+ "epoch": 4.504,
527
+ "grad_norm": 0.24820686876773834,
528
+ "kl": 0.09252522786458334,
529
+ "learning_rate": 3.529873930293545e-07,
530
+ "loss": 0.0572,
531
+ "reward": 0.9163194666306178,
532
+ "reward_std": 0.38814649879932406,
533
+ "rewards/accuracy_reward": 0.15885417160267631,
534
+ "rewards/format_reward": 0.7574652969837189,
535
+ "step": 185
536
+ },
537
+ {
538
+ "clip_ratio": 0.0,
539
+ "completion_length": 533.9510559082031,
540
+ "epoch": 4.624,
541
+ "grad_norm": 0.2605077922344208,
542
+ "kl": 0.09794921875,
543
+ "learning_rate": 3.0047209178924635e-07,
544
+ "loss": 0.0484,
545
+ "reward": 0.9178819686174393,
546
+ "reward_std": 0.4066275705893834,
547
+ "rewards/accuracy_reward": 0.16649306093653043,
548
+ "rewards/format_reward": 0.7513889104127884,
549
+ "step": 190
550
+ },
551
+ {
552
+ "clip_ratio": 0.0,
553
+ "completion_length": 535.4948069254557,
554
+ "epoch": 4.744,
555
+ "grad_norm": 0.24027736485004425,
556
+ "kl": 0.10040690104166666,
557
+ "learning_rate": 2.514892518288988e-07,
558
+ "loss": 0.0508,
559
+ "reward": 0.9135416855414709,
560
+ "reward_std": 0.3980190739035606,
561
+ "rewards/accuracy_reward": 0.1682291696468989,
562
+ "rewards/format_reward": 0.7453125208616257,
563
+ "step": 195
564
+ },
565
+ {
566
+ "epoch": 4.864,
567
+ "grad_norm": 0.24247297644615173,
568
+ "learning_rate": 2.0628622566063058e-07,
569
+ "loss": 0.0556,
570
+ "step": 200
571
+ },
572
+ {
573
+ "epoch": 4.864,
574
+ "eval_clip_ratio": 0.0,
575
+ "eval_completion_length": 497.66507427509015,
576
+ "eval_kl": 0.10235126201923077,
577
+ "eval_loss": 0.09290527552366257,
578
+ "eval_reward": 0.8525641239606417,
579
+ "eval_reward_std": 0.4311282199162703,
580
+ "eval_rewards/accuracy_reward": 0.1314102616161108,
581
+ "eval_rewards/format_reward": 0.7211538690787095,
582
+ "eval_runtime": 164.8378,
583
+ "eval_samples_per_second": 0.601,
584
+ "eval_steps_per_second": 0.018,
585
+ "step": 200
586
+ },
587
+ {
588
+ "clip_ratio": 0.0,
589
+ "completion_length": 527.5610371907552,
590
+ "epoch": 4.984,
591
+ "grad_norm": 0.24340181052684784,
592
+ "kl": 0.103265380859375,
593
+ "learning_rate": 1.6509127857277782e-07,
594
+ "loss": 0.0592,
595
+ "reward": 0.9018229390184085,
596
+ "reward_std": 0.38689753947158656,
597
+ "rewards/accuracy_reward": 0.16232639336958526,
598
+ "rewards/format_reward": 0.7394965469837189,
599
+ "step": 205
600
+ },
601
+ {
602
+ "clip_ratio": 0.0,
603
+ "completion_length": 535.6948079427083,
604
+ "epoch": 5.12,
605
+ "grad_norm": 0.2896415889263153,
606
+ "kl": 0.10675455729166666,
607
+ "learning_rate": 1.2811243594045694e-07,
608
+ "loss": 0.059,
609
+ "reward": 0.8942708512147267,
610
+ "reward_std": 0.4014328221480052,
611
+ "rewards/accuracy_reward": 0.15416667043852308,
612
+ "rewards/format_reward": 0.7401041835546494,
613
+ "step": 210
614
+ },
615
+ {
616
+ "clip_ratio": 0.0,
617
+ "completion_length": 525.1632136027018,
618
+ "epoch": 5.24,
619
+ "grad_norm": 0.2585560977458954,
620
+ "kl": 0.10707194010416667,
621
+ "learning_rate": 9.55364327434105e-08,
622
+ "loss": 0.0576,
623
+ "reward": 0.8967014074325561,
624
+ "reward_std": 0.3964859182635943,
625
+ "rewards/accuracy_reward": 0.1552083384245634,
626
+ "rewards/format_reward": 0.7414930721124013,
627
+ "step": 215
628
+ },
629
+ {
630
+ "clip_ratio": 0.0,
631
+ "completion_length": 518.6449793497721,
632
+ "epoch": 5.36,
633
+ "grad_norm": 0.2638151943683624,
634
+ "kl": 0.110107421875,
635
+ "learning_rate": 6.75277705956443e-08,
636
+ "loss": 0.0621,
637
+ "reward": 0.910069465637207,
638
+ "reward_std": 0.38575134972731273,
639
+ "rewards/accuracy_reward": 0.1703125045945247,
640
+ "rewards/format_reward": 0.739756965637207,
641
+ "step": 220
642
+ },
643
+ {
644
+ "clip_ratio": 0.0,
645
+ "completion_length": 519.6583480834961,
646
+ "epoch": 5.48,
647
+ "grad_norm": 0.2671918570995331,
648
+ "kl": 0.10970865885416667,
649
+ "learning_rate": 4.422788704864633e-08,
650
+ "loss": 0.0555,
651
+ "reward": 0.9017361313104629,
652
+ "reward_std": 0.39265564555923144,
653
+ "rewards/accuracy_reward": 0.16128472667187452,
654
+ "rewards/format_reward": 0.7404514094193776,
655
+ "step": 225
656
+ },
657
+ {
658
+ "clip_ratio": 0.0,
659
+ "completion_length": 518.6614771525066,
660
+ "epoch": 5.6,
661
+ "grad_norm": 0.2516481876373291,
662
+ "kl": 0.10393880208333334,
663
+ "learning_rate": 2.575444136302185e-08,
664
+ "loss": 0.0585,
665
+ "reward": 0.9203125298023224,
666
+ "reward_std": 0.3854361062248548,
667
+ "rewards/accuracy_reward": 0.16527778220673403,
668
+ "rewards/format_reward": 0.7550347407658895,
669
+ "step": 230
670
+ },
671
+ {
672
+ "clip_ratio": 0.0,
673
+ "completion_length": 523.0328257242838,
674
+ "epoch": 5.72,
675
+ "grad_norm": 0.24842773377895355,
676
+ "kl": 0.10460611979166666,
677
+ "learning_rate": 1.220072035523989e-08,
678
+ "loss": 0.0615,
679
+ "reward": 0.9104166885217031,
680
+ "reward_std": 0.4020949920018514,
681
+ "rewards/accuracy_reward": 0.16076389284183581,
682
+ "rewards/format_reward": 0.7496527930100759,
683
+ "step": 235
684
+ },
685
+ {
686
+ "clip_ratio": 0.0,
687
+ "completion_length": 548.0658167521159,
688
+ "epoch": 5.84,
689
+ "grad_norm": 0.2960110008716583,
690
+ "kl": 0.10340983072916667,
691
+ "learning_rate": 3.6351673198347087e-09,
692
+ "loss": 0.0572,
693
+ "reward": 0.894097242752711,
694
+ "reward_std": 0.39487800349791846,
695
+ "rewards/accuracy_reward": 0.15312500384946665,
696
+ "rewards/format_reward": 0.740972242752711,
697
+ "step": 240
698
+ },
699
+ {
700
+ "clip_ratio": 0.0,
701
+ "completion_length": 524.3946365356445,
702
+ "epoch": 5.96,
703
+ "grad_norm": 0.28276899456977844,
704
+ "kl": 0.1115234375,
705
+ "learning_rate": 1.0103640590064522e-10,
706
+ "loss": 0.0614,
707
+ "reward": 0.8968750178813935,
708
+ "reward_std": 0.40502374321222306,
709
+ "rewards/accuracy_reward": 0.15798611640930177,
710
+ "rewards/format_reward": 0.7388889054457347,
711
+ "step": 245
712
+ },
713
+ {
714
+ "clip_ratio": 0.0,
715
+ "completion_length": 484.8463541666667,
716
+ "epoch": 5.984,
717
+ "kl": 0.11258951822916667,
718
+ "reward": 0.8923611293236414,
719
+ "reward_std": 0.40955925981203717,
720
+ "rewards/accuracy_reward": 0.1605902835726738,
721
+ "rewards/format_reward": 0.731770858168602,
722
+ "step": 246,
723
+ "total_flos": 0.0,
724
+ "train_loss": 0.03362821891328426,
725
+ "train_runtime": 77437.8148,
726
+ "train_samples_per_second": 0.62,
727
+ "train_steps_per_second": 0.003
728
+ }
729
+ ],
730
+ "logging_steps": 5,
731
+ "max_steps": 246,
732
+ "num_input_tokens_seen": 0,
733
+ "num_train_epochs": 6,
734
+ "save_steps": 500,
735
+ "stateful_callbacks": {
736
+ "TrainerControl": {
737
+ "args": {
738
+ "should_epoch_stop": false,
739
+ "should_evaluate": false,
740
+ "should_log": false,
741
+ "should_save": true,
742
+ "should_training_stop": true
743
+ },
744
+ "attributes": {}
745
+ }
746
+ },
747
+ "total_flos": 0.0,
748
+ "train_batch_size": 12,
749
+ "trial_name": null,
750
+ "trial_params": null
751
+ }