File size: 17,146 Bytes
73ff368
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.22970024118525326,
  "eval_steps": 50,
  "global_step": 250,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.00918800964741013,
      "grad_norm": 0.036612071096897125,
      "learning_rate": 4.999451708687114e-06,
      "logits/chosen": 15.01579761505127,
      "logits/rejected": 15.359031677246094,
      "logps/chosen": -0.2681262791156769,
      "logps/rejected": -0.31947994232177734,
      "loss": 0.9551,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.40218934416770935,
      "rewards/margins": 0.07703053951263428,
      "rewards/rejected": -0.479219913482666,
      "step": 10
    },
    {
      "epoch": 0.01837601929482026,
      "grad_norm": 0.05575725808739662,
      "learning_rate": 4.997807075247147e-06,
      "logits/chosen": 14.570712089538574,
      "logits/rejected": 15.321355819702148,
      "logps/chosen": -0.2867889404296875,
      "logps/rejected": -0.3514837622642517,
      "loss": 0.923,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.43018341064453125,
      "rewards/margins": 0.09704220294952393,
      "rewards/rejected": -0.5272256135940552,
      "step": 20
    },
    {
      "epoch": 0.02756402894223039,
      "grad_norm": 0.0492466576397419,
      "learning_rate": 4.9950668210706795e-06,
      "logits/chosen": 14.748420715332031,
      "logits/rejected": 14.969354629516602,
      "logps/chosen": -0.28405922651290894,
      "logps/rejected": -0.32855403423309326,
      "loss": 0.9357,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.426088809967041,
      "rewards/margins": 0.06674225628376007,
      "rewards/rejected": -0.4928310811519623,
      "step": 30
    },
    {
      "epoch": 0.03675203858964052,
      "grad_norm": 0.05719422921538353,
      "learning_rate": 4.9912321481237616e-06,
      "logits/chosen": 14.28278923034668,
      "logits/rejected": 14.76964282989502,
      "logps/chosen": -0.27940627932548523,
      "logps/rejected": -0.3408831059932709,
      "loss": 0.9215,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.41910940408706665,
      "rewards/margins": 0.09221524000167847,
      "rewards/rejected": -0.5113246440887451,
      "step": 40
    },
    {
      "epoch": 0.04594004823705065,
      "grad_norm": 0.06247895210981369,
      "learning_rate": 4.986304738420684e-06,
      "logits/chosen": 14.943578720092773,
      "logits/rejected": 14.936178207397461,
      "logps/chosen": -0.2819541394710541,
      "logps/rejected": -0.3245392441749573,
      "loss": 0.9464,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.4229312539100647,
      "rewards/margins": 0.06387762725353241,
      "rewards/rejected": -0.4868088662624359,
      "step": 50
    },
    {
      "epoch": 0.04594004823705065,
      "eval_logits/chosen": 14.7594575881958,
      "eval_logits/rejected": 15.193694114685059,
      "eval_logps/chosen": -0.2807807922363281,
      "eval_logps/rejected": -0.36209535598754883,
      "eval_loss": 0.9397181868553162,
      "eval_rewards/accuracies": 0.5681818127632141,
      "eval_rewards/chosen": -0.4211711883544922,
      "eval_rewards/margins": 0.12197184562683105,
      "eval_rewards/rejected": -0.5431429743766785,
      "eval_runtime": 24.9762,
      "eval_samples_per_second": 28.187,
      "eval_steps_per_second": 3.523,
      "step": 50
    },
    {
      "epoch": 0.05512805788446078,
      "grad_norm": 0.11519577354192734,
      "learning_rate": 4.980286753286196e-06,
      "logits/chosen": 14.996228218078613,
      "logits/rejected": 15.37781810760498,
      "logps/chosen": -0.2809831202030182,
      "logps/rejected": -0.35486167669296265,
      "loss": 0.9318,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.4214746952056885,
      "rewards/margins": 0.1108178049325943,
      "rewards/rejected": -0.5322924852371216,
      "step": 60
    },
    {
      "epoch": 0.06431606753187091,
      "grad_norm": 0.06691388040781021,
      "learning_rate": 4.973180832407471e-06,
      "logits/chosen": 14.612454414367676,
      "logits/rejected": 15.678136825561523,
      "logps/chosen": -0.2569667100906372,
      "logps/rejected": -0.40047627687454224,
      "loss": 0.9158,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.3854501247406006,
      "rewards/margins": 0.21526429057121277,
      "rewards/rejected": -0.600714385509491,
      "step": 70
    },
    {
      "epoch": 0.07350407717928104,
      "grad_norm": 0.05976058170199394,
      "learning_rate": 4.964990092676263e-06,
      "logits/chosen": 14.873895645141602,
      "logits/rejected": 15.50474739074707,
      "logps/chosen": -0.28742527961730957,
      "logps/rejected": -0.37555089592933655,
      "loss": 0.9372,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.43113788962364197,
      "rewards/margins": 0.13218846917152405,
      "rewards/rejected": -0.5633264183998108,
      "step": 80
    },
    {
      "epoch": 0.08269208682669117,
      "grad_norm": 0.0602131113409996,
      "learning_rate": 4.9557181268217225e-06,
      "logits/chosen": 14.356691360473633,
      "logits/rejected": 14.895658493041992,
      "logps/chosen": -0.2613506317138672,
      "logps/rejected": -0.3317110538482666,
      "loss": 0.9324,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.3920259475708008,
      "rewards/margins": 0.10554064810276031,
      "rewards/rejected": -0.4975665509700775,
      "step": 90
    },
    {
      "epoch": 0.0918800964741013,
      "grad_norm": 0.07126503437757492,
      "learning_rate": 4.9453690018345144e-06,
      "logits/chosen": 14.862826347351074,
      "logits/rejected": 15.257089614868164,
      "logps/chosen": -0.2707213759422302,
      "logps/rejected": -0.3511395752429962,
      "loss": 0.9353,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.4060820937156677,
      "rewards/margins": 0.1206272691488266,
      "rewards/rejected": -0.5267094373703003,
      "step": 100
    },
    {
      "epoch": 0.0918800964741013,
      "eval_logits/chosen": 14.664334297180176,
      "eval_logits/rejected": 15.113536834716797,
      "eval_logps/chosen": -0.2750833034515381,
      "eval_logps/rejected": -0.36540210247039795,
      "eval_loss": 0.9324077367782593,
      "eval_rewards/accuracies": 0.5795454382896423,
      "eval_rewards/chosen": -0.41262495517730713,
      "eval_rewards/margins": 0.1354781985282898,
      "eval_rewards/rejected": -0.5481031537055969,
      "eval_runtime": 24.4286,
      "eval_samples_per_second": 28.819,
      "eval_steps_per_second": 3.602,
      "step": 100
    },
    {
      "epoch": 0.10106810612151143,
      "grad_norm": 0.07136944681406021,
      "learning_rate": 4.933947257182901e-06,
      "logits/chosen": 14.942098617553711,
      "logits/rejected": 15.138586044311523,
      "logps/chosen": -0.2860812246799469,
      "logps/rejected": -0.36259371042251587,
      "loss": 0.934,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.42912182211875916,
      "rewards/margins": 0.11476878076791763,
      "rewards/rejected": -0.5438905954360962,
      "step": 110
    },
    {
      "epoch": 0.11025611576892155,
      "grad_norm": 0.07038908451795578,
      "learning_rate": 4.921457902821578e-06,
      "logits/chosen": 14.488851547241211,
      "logits/rejected": 14.702054023742676,
      "logps/chosen": -0.2662215232849121,
      "logps/rejected": -0.3013685941696167,
      "loss": 0.9202,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.39933228492736816,
      "rewards/margins": 0.05272058770060539,
      "rewards/rejected": -0.45205289125442505,
      "step": 120
    },
    {
      "epoch": 0.11944412541633169,
      "grad_norm": 0.06875801086425781,
      "learning_rate": 4.907906416994146e-06,
      "logits/chosen": 14.075657844543457,
      "logits/rejected": 14.696513175964355,
      "logps/chosen": -0.250360369682312,
      "logps/rejected": -0.3504650592803955,
      "loss": 0.9266,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.375540554523468,
      "rewards/margins": 0.15015706419944763,
      "rewards/rejected": -0.5256975889205933,
      "step": 130
    },
    {
      "epoch": 0.12863213506374183,
      "grad_norm": 0.0984601378440857,
      "learning_rate": 4.893298743830168e-06,
      "logits/chosen": 13.738212585449219,
      "logits/rejected": 14.311574935913086,
      "logps/chosen": -0.26711025834083557,
      "logps/rejected": -0.3587702810764313,
      "loss": 0.9185,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.40066537261009216,
      "rewards/margins": 0.13749003410339355,
      "rewards/rejected": -0.5381554365158081,
      "step": 140
    },
    {
      "epoch": 0.13782014471115195,
      "grad_norm": 0.10201425850391388,
      "learning_rate": 4.8776412907378845e-06,
      "logits/chosen": 13.7462797164917,
      "logits/rejected": 14.230626106262207,
      "logps/chosen": -0.25559619069099426,
      "logps/rejected": -0.3708702623844147,
      "loss": 0.9106,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.3833943009376526,
      "rewards/margins": 0.17291104793548584,
      "rewards/rejected": -0.5563054084777832,
      "step": 150
    },
    {
      "epoch": 0.13782014471115195,
      "eval_logits/chosen": 13.458538055419922,
      "eval_logits/rejected": 13.998083114624023,
      "eval_logps/chosen": -0.2759075462818146,
      "eval_logps/rejected": -0.3873325288295746,
      "eval_loss": 0.9164085388183594,
      "eval_rewards/accuracies": 0.5795454382896423,
      "eval_rewards/chosen": -0.41386130452156067,
      "eval_rewards/margins": 0.1671374887228012,
      "eval_rewards/rejected": -0.5809988379478455,
      "eval_runtime": 24.4393,
      "eval_samples_per_second": 28.806,
      "eval_steps_per_second": 3.601,
      "step": 150
    },
    {
      "epoch": 0.14700815435856207,
      "grad_norm": 0.11537656933069229,
      "learning_rate": 4.860940925593703e-06,
      "logits/chosen": 12.686149597167969,
      "logits/rejected": 13.478736877441406,
      "logps/chosen": -0.23941929638385773,
      "logps/rejected": -0.3713286519050598,
      "loss": 0.9094,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.3591288924217224,
      "rewards/margins": 0.1978640854358673,
      "rewards/rejected": -0.5569929480552673,
      "step": 160
    },
    {
      "epoch": 0.1561961640059722,
      "grad_norm": 0.1196313351392746,
      "learning_rate": 4.84320497372973e-06,
      "logits/chosen": 13.221656799316406,
      "logits/rejected": 13.317082405090332,
      "logps/chosen": -0.3033878207206726,
      "logps/rejected": -0.3784424960613251,
      "loss": 0.9057,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.4550817608833313,
      "rewards/margins": 0.11258199065923691,
      "rewards/rejected": -0.5676637887954712,
      "step": 170
    },
    {
      "epoch": 0.16538417365338234,
      "grad_norm": 0.18745549023151398,
      "learning_rate": 4.824441214720629e-06,
      "logits/chosen": 11.797627449035645,
      "logits/rejected": 12.031414985656738,
      "logps/chosen": -0.2746419608592987,
      "logps/rejected": -0.3629845976829529,
      "loss": 0.8954,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.41196292638778687,
      "rewards/margins": 0.13251398503780365,
      "rewards/rejected": -0.5444768667221069,
      "step": 180
    },
    {
      "epoch": 0.17457218330079247,
      "grad_norm": 0.1806156188249588,
      "learning_rate": 4.804657878971252e-06,
      "logits/chosen": 10.275301933288574,
      "logits/rejected": 10.937273025512695,
      "logps/chosen": -0.2880379557609558,
      "logps/rejected": -0.4154580533504486,
      "loss": 0.8875,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.43205690383911133,
      "rewards/margins": 0.19113019108772278,
      "rewards/rejected": -0.6231871247291565,
      "step": 190
    },
    {
      "epoch": 0.1837601929482026,
      "grad_norm": 0.1839464157819748,
      "learning_rate": 4.783863644106502e-06,
      "logits/chosen": 10.020039558410645,
      "logits/rejected": 10.66059398651123,
      "logps/chosen": -0.3136019706726074,
      "logps/rejected": -0.4385503828525543,
      "loss": 0.8647,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.47040295600891113,
      "rewards/margins": 0.18742261826992035,
      "rewards/rejected": -0.6578255891799927,
      "step": 200
    },
    {
      "epoch": 0.1837601929482026,
      "eval_logits/chosen": 9.442557334899902,
      "eval_logits/rejected": 10.053345680236816,
      "eval_logps/chosen": -0.3080674409866333,
      "eval_logps/rejected": -0.4899139702320099,
      "eval_loss": 0.8702690005302429,
      "eval_rewards/accuracies": 0.6931818127632141,
      "eval_rewards/chosen": -0.46210116147994995,
      "eval_rewards/margins": 0.27276986837387085,
      "eval_rewards/rejected": -0.7348710894584656,
      "eval_runtime": 24.4185,
      "eval_samples_per_second": 28.831,
      "eval_steps_per_second": 3.604,
      "step": 200
    },
    {
      "epoch": 0.19294820259561274,
      "grad_norm": 0.269613116979599,
      "learning_rate": 4.762067631165049e-06,
      "logits/chosen": 7.941342353820801,
      "logits/rejected": 8.542920112609863,
      "logps/chosen": -0.3083941638469696,
      "logps/rejected": -0.5024437308311462,
      "loss": 0.8471,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.4625912606716156,
      "rewards/margins": 0.29107433557510376,
      "rewards/rejected": -0.7536656856536865,
      "step": 210
    },
    {
      "epoch": 0.20213621224302286,
      "grad_norm": 0.2640094459056854,
      "learning_rate": 4.7392794005985324e-06,
      "logits/chosen": 7.587499141693115,
      "logits/rejected": 7.592519283294678,
      "logps/chosen": -0.3381899893283844,
      "logps/rejected": -0.48494213819503784,
      "loss": 0.8427,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.5072849988937378,
      "rewards/margins": 0.22012826800346375,
      "rewards/rejected": -0.7274132966995239,
      "step": 220
    },
    {
      "epoch": 0.21132422189043298,
      "grad_norm": 0.29708293080329895,
      "learning_rate": 4.715508948078037e-06,
      "logits/chosen": 6.250656604766846,
      "logits/rejected": 6.7652716636657715,
      "logps/chosen": -0.3644888997077942,
      "logps/rejected": -0.5470594167709351,
      "loss": 0.8201,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.5467333793640137,
      "rewards/margins": 0.2738557755947113,
      "rewards/rejected": -0.8205891847610474,
      "step": 230
    },
    {
      "epoch": 0.2205122315378431,
      "grad_norm": 0.35299497842788696,
      "learning_rate": 4.690766700109659e-06,
      "logits/chosen": 4.6331706047058105,
      "logits/rejected": 4.710076332092285,
      "logps/chosen": -0.3634452223777771,
      "logps/rejected": -0.7193974256515503,
      "loss": 0.7877,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.545167863368988,
      "rewards/margins": 0.5339283347129822,
      "rewards/rejected": -1.0790963172912598,
      "step": 240
    },
    {
      "epoch": 0.22970024118525326,
      "grad_norm": 0.4265730082988739,
      "learning_rate": 4.665063509461098e-06,
      "logits/chosen": 4.992984771728516,
      "logits/rejected": 4.606354713439941,
      "logps/chosen": -0.413116455078125,
      "logps/rejected": -0.7104976177215576,
      "loss": 0.7902,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.6196746826171875,
      "rewards/margins": 0.4460717737674713,
      "rewards/rejected": -1.0657463073730469,
      "step": 250
    },
    {
      "epoch": 0.22970024118525326,
      "eval_logits/chosen": 4.127804279327393,
      "eval_logits/rejected": 3.742251396179199,
      "eval_logps/chosen": -0.420327365398407,
      "eval_logps/rejected": -0.7902651429176331,
      "eval_loss": 0.7682384252548218,
      "eval_rewards/accuracies": 0.7159090638160706,
      "eval_rewards/chosen": -0.6304910182952881,
      "eval_rewards/margins": 0.5549066662788391,
      "eval_rewards/rejected": -1.185397744178772,
      "eval_runtime": 24.4318,
      "eval_samples_per_second": 28.815,
      "eval_steps_per_second": 3.602,
      "step": 250
    }
  ],
  "logging_steps": 10,
  "max_steps": 1500,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 6.082014861863158e+17,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}