File size: 20,435 Bytes
eb02bd2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.2664298401420959,
  "eval_steps": 50,
  "global_step": 300,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.008880994671403197,
      "grad_norm": 0.04571289196610451,
      "learning_rate": 4.999451708687114e-06,
      "logits/chosen": 14.56671142578125,
      "logits/rejected": 15.112574577331543,
      "logps/chosen": -0.26506316661834717,
      "logps/rejected": -0.3439488410949707,
      "loss": 0.9267,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.39759472012519836,
      "rewards/margins": 0.11832849681377411,
      "rewards/rejected": -0.5159232020378113,
      "step": 10
    },
    {
      "epoch": 0.017761989342806393,
      "grad_norm": 0.0512714721262455,
      "learning_rate": 4.997807075247147e-06,
      "logits/chosen": 14.376543045043945,
      "logits/rejected": 14.862703323364258,
      "logps/chosen": -0.2708089351654053,
      "logps/rejected": -0.32412824034690857,
      "loss": 0.936,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.4062133729457855,
      "rewards/margins": 0.07997899502515793,
      "rewards/rejected": -0.4861923158168793,
      "step": 20
    },
    {
      "epoch": 0.02664298401420959,
      "grad_norm": 0.058383647352457047,
      "learning_rate": 4.9950668210706795e-06,
      "logits/chosen": 14.208717346191406,
      "logits/rejected": 15.370651245117188,
      "logps/chosen": -0.28206294775009155,
      "logps/rejected": -0.38387423753738403,
      "loss": 0.9215,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.42309442162513733,
      "rewards/margins": 0.15271687507629395,
      "rewards/rejected": -0.5758112668991089,
      "step": 30
    },
    {
      "epoch": 0.035523978685612786,
      "grad_norm": 0.06262075155973434,
      "learning_rate": 4.9912321481237616e-06,
      "logits/chosen": 14.768765449523926,
      "logits/rejected": 15.169331550598145,
      "logps/chosen": -0.27857059240341187,
      "logps/rejected": -0.3388269543647766,
      "loss": 0.9386,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.4178559184074402,
      "rewards/margins": 0.09038447588682175,
      "rewards/rejected": -0.5082404017448425,
      "step": 40
    },
    {
      "epoch": 0.04440497335701599,
      "grad_norm": 0.06259036809206009,
      "learning_rate": 4.986304738420684e-06,
      "logits/chosen": 14.950456619262695,
      "logits/rejected": 15.232122421264648,
      "logps/chosen": -0.2961367070674896,
      "logps/rejected": -0.3322262465953827,
      "loss": 0.9317,
      "rewards/accuracies": 0.44999998807907104,
      "rewards/chosen": -0.44420504570007324,
      "rewards/margins": 0.054134320467710495,
      "rewards/rejected": -0.4983394145965576,
      "step": 50
    },
    {
      "epoch": 0.04440497335701599,
      "eval_logits/chosen": 14.56529426574707,
      "eval_logits/rejected": 14.895020484924316,
      "eval_logps/chosen": -0.2806546986103058,
      "eval_logps/rejected": -0.3486972451210022,
      "eval_loss": 0.9381324052810669,
      "eval_rewards/accuracies": 0.5274725556373596,
      "eval_rewards/chosen": -0.4209820330142975,
      "eval_rewards/margins": 0.10206379741430283,
      "eval_rewards/rejected": -0.5230458974838257,
      "eval_runtime": 25.2574,
      "eval_samples_per_second": 28.823,
      "eval_steps_per_second": 3.603,
      "step": 50
    },
    {
      "epoch": 0.05328596802841918,
      "grad_norm": 0.07301533967256546,
      "learning_rate": 4.980286753286196e-06,
      "logits/chosen": 14.195574760437012,
      "logits/rejected": 15.173194885253906,
      "logps/chosen": -0.2693648636341095,
      "logps/rejected": -0.33997970819473267,
      "loss": 0.9319,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.40404725074768066,
      "rewards/margins": 0.10592226684093475,
      "rewards/rejected": -0.5099694728851318,
      "step": 60
    },
    {
      "epoch": 0.06216696269982238,
      "grad_norm": 0.0659889206290245,
      "learning_rate": 4.973180832407471e-06,
      "logits/chosen": 14.910173416137695,
      "logits/rejected": 15.361429214477539,
      "logps/chosen": -0.28456225991249084,
      "logps/rejected": -0.3702812194824219,
      "loss": 0.9185,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.42684346437454224,
      "rewards/margins": 0.12857840955257416,
      "rewards/rejected": -0.5554218292236328,
      "step": 70
    },
    {
      "epoch": 0.07104795737122557,
      "grad_norm": 0.05815625935792923,
      "learning_rate": 4.964990092676263e-06,
      "logits/chosen": 14.407182693481445,
      "logits/rejected": 14.948204040527344,
      "logps/chosen": -0.292889267206192,
      "logps/rejected": -0.3381648063659668,
      "loss": 0.9388,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.43933385610580444,
      "rewards/margins": 0.06791339069604874,
      "rewards/rejected": -0.5072472095489502,
      "step": 80
    },
    {
      "epoch": 0.07992895204262877,
      "grad_norm": 0.06627190113067627,
      "learning_rate": 4.9557181268217225e-06,
      "logits/chosen": 14.622471809387207,
      "logits/rejected": 15.167770385742188,
      "logps/chosen": -0.28155821561813354,
      "logps/rejected": -0.33633899688720703,
      "loss": 0.9256,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.4223373532295227,
      "rewards/margins": 0.08217118680477142,
      "rewards/rejected": -0.5045084953308105,
      "step": 90
    },
    {
      "epoch": 0.08880994671403197,
      "grad_norm": 0.0724545568227768,
      "learning_rate": 4.9453690018345144e-06,
      "logits/chosen": 14.289724349975586,
      "logits/rejected": 14.882037162780762,
      "logps/chosen": -0.2791440486907959,
      "logps/rejected": -0.35329627990722656,
      "loss": 0.9374,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.41871610283851624,
      "rewards/margins": 0.11122839152812958,
      "rewards/rejected": -0.5299445390701294,
      "step": 100
    },
    {
      "epoch": 0.08880994671403197,
      "eval_logits/chosen": 14.337930679321289,
      "eval_logits/rejected": 14.689269065856934,
      "eval_logps/chosen": -0.2726942300796509,
      "eval_logps/rejected": -0.34668418765068054,
      "eval_loss": 0.9302808046340942,
      "eval_rewards/accuracies": 0.5384615659713745,
      "eval_rewards/chosen": -0.40904131531715393,
      "eval_rewards/margins": 0.11098497360944748,
      "eval_rewards/rejected": -0.5200263261795044,
      "eval_runtime": 25.2585,
      "eval_samples_per_second": 28.822,
      "eval_steps_per_second": 3.603,
      "step": 100
    },
    {
      "epoch": 0.09769094138543517,
      "grad_norm": 0.08156246691942215,
      "learning_rate": 4.933947257182901e-06,
      "logits/chosen": 14.499124526977539,
      "logits/rejected": 14.916313171386719,
      "logps/chosen": -0.2798352837562561,
      "logps/rejected": -0.3477734327316284,
      "loss": 0.9243,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.4197530150413513,
      "rewards/margins": 0.10190720856189728,
      "rewards/rejected": -0.5216602087020874,
      "step": 110
    },
    {
      "epoch": 0.10657193605683836,
      "grad_norm": 0.08161844313144684,
      "learning_rate": 4.921457902821578e-06,
      "logits/chosen": 13.595013618469238,
      "logits/rejected": 14.390353202819824,
      "logps/chosen": -0.26682502031326294,
      "logps/rejected": -0.3336995542049408,
      "loss": 0.9123,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.400237500667572,
      "rewards/margins": 0.10031183809041977,
      "rewards/rejected": -0.5005493760108948,
      "step": 120
    },
    {
      "epoch": 0.11545293072824156,
      "grad_norm": 0.28624778985977173,
      "learning_rate": 4.907906416994146e-06,
      "logits/chosen": 13.711044311523438,
      "logits/rejected": 14.558542251586914,
      "logps/chosen": -0.27874043583869934,
      "logps/rejected": -0.3582325279712677,
      "loss": 0.9163,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.41811060905456543,
      "rewards/margins": 0.11923813819885254,
      "rewards/rejected": -0.537348747253418,
      "step": 130
    },
    {
      "epoch": 0.12433392539964476,
      "grad_norm": 0.10971464216709137,
      "learning_rate": 4.893298743830168e-06,
      "logits/chosen": 14.18798828125,
      "logits/rejected": 14.993026733398438,
      "logps/chosen": -0.2750400900840759,
      "logps/rejected": -0.39451608061790466,
      "loss": 0.9098,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.4125601351261139,
      "rewards/margins": 0.17921395599842072,
      "rewards/rejected": -0.5917741060256958,
      "step": 140
    },
    {
      "epoch": 0.13321492007104796,
      "grad_norm": 0.09321591258049011,
      "learning_rate": 4.8776412907378845e-06,
      "logits/chosen": 12.775139808654785,
      "logits/rejected": 13.751996994018555,
      "logps/chosen": -0.28446996212005615,
      "logps/rejected": -0.36404967308044434,
      "loss": 0.9104,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.42670494318008423,
      "rewards/margins": 0.11936960369348526,
      "rewards/rejected": -0.5460745096206665,
      "step": 150
    },
    {
      "epoch": 0.13321492007104796,
      "eval_logits/chosen": 12.97266960144043,
      "eval_logits/rejected": 13.47339916229248,
      "eval_logps/chosen": -0.27297571301460266,
      "eval_logps/rejected": -0.36854612827301025,
      "eval_loss": 0.9143257737159729,
      "eval_rewards/accuracies": 0.5824176073074341,
      "eval_rewards/chosen": -0.4094635546207428,
      "eval_rewards/margins": 0.14335563778877258,
      "eval_rewards/rejected": -0.5528191924095154,
      "eval_runtime": 25.2406,
      "eval_samples_per_second": 28.842,
      "eval_steps_per_second": 3.605,
      "step": 150
    },
    {
      "epoch": 0.14209591474245115,
      "grad_norm": 0.11029861867427826,
      "learning_rate": 4.860940925593703e-06,
      "logits/chosen": 12.677947998046875,
      "logits/rejected": 13.396716117858887,
      "logps/chosen": -0.2631794512271881,
      "logps/rejected": -0.37102141976356506,
      "loss": 0.9051,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.3947691321372986,
      "rewards/margins": 0.161762997508049,
      "rewards/rejected": -0.5565321445465088,
      "step": 160
    },
    {
      "epoch": 0.15097690941385436,
      "grad_norm": 0.15728294849395752,
      "learning_rate": 4.84320497372973e-06,
      "logits/chosen": 12.620219230651855,
      "logits/rejected": 13.189640998840332,
      "logps/chosen": -0.2947639524936676,
      "logps/rejected": -0.3843482732772827,
      "loss": 0.8906,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.442145973443985,
      "rewards/margins": 0.13437646627426147,
      "rewards/rejected": -0.5765224099159241,
      "step": 170
    },
    {
      "epoch": 0.15985790408525755,
      "grad_norm": 0.31504154205322266,
      "learning_rate": 4.824441214720629e-06,
      "logits/chosen": 11.487619400024414,
      "logits/rejected": 12.33470344543457,
      "logps/chosen": -0.271095871925354,
      "logps/rejected": -0.4252637028694153,
      "loss": 0.8766,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.406643807888031,
      "rewards/margins": 0.2312517911195755,
      "rewards/rejected": -0.6378955245018005,
      "step": 180
    },
    {
      "epoch": 0.16873889875666073,
      "grad_norm": 0.19222252070903778,
      "learning_rate": 4.804657878971252e-06,
      "logits/chosen": 10.093737602233887,
      "logits/rejected": 10.851752281188965,
      "logps/chosen": -0.2679918110370636,
      "logps/rejected": -0.437336266040802,
      "loss": 0.884,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.4019877314567566,
      "rewards/margins": 0.2540166974067688,
      "rewards/rejected": -0.6560044288635254,
      "step": 190
    },
    {
      "epoch": 0.17761989342806395,
      "grad_norm": 0.2275688648223877,
      "learning_rate": 4.783863644106502e-06,
      "logits/chosen": 9.483477592468262,
      "logits/rejected": 10.106366157531738,
      "logps/chosen": -0.2957404553890228,
      "logps/rejected": -0.40739065408706665,
      "loss": 0.8767,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.4436107575893402,
      "rewards/margins": 0.16747523844242096,
      "rewards/rejected": -0.6110859513282776,
      "step": 200
    },
    {
      "epoch": 0.17761989342806395,
      "eval_logits/chosen": 8.491498947143555,
      "eval_logits/rejected": 8.999146461486816,
      "eval_logps/chosen": -0.3135836124420166,
      "eval_logps/rejected": -0.4829566180706024,
      "eval_loss": 0.8664290904998779,
      "eval_rewards/accuracies": 0.6263736486434937,
      "eval_rewards/chosen": -0.4703753888607025,
      "eval_rewards/margins": 0.2540595233440399,
      "eval_rewards/rejected": -0.7244349122047424,
      "eval_runtime": 25.2553,
      "eval_samples_per_second": 28.826,
      "eval_steps_per_second": 3.603,
      "step": 200
    },
    {
      "epoch": 0.18650088809946713,
      "grad_norm": 0.27885496616363525,
      "learning_rate": 4.762067631165049e-06,
      "logits/chosen": 7.234966278076172,
      "logits/rejected": 8.313450813293457,
      "logps/chosen": -0.29102542996406555,
      "logps/rejected": -0.49241799116134644,
      "loss": 0.8556,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.43653813004493713,
      "rewards/margins": 0.3020888566970825,
      "rewards/rejected": -0.7386269569396973,
      "step": 210
    },
    {
      "epoch": 0.19538188277087035,
      "grad_norm": 0.29907363653182983,
      "learning_rate": 4.7392794005985324e-06,
      "logits/chosen": 7.907521724700928,
      "logits/rejected": 8.253190994262695,
      "logps/chosen": -0.33691853284835815,
      "logps/rejected": -0.4829257130622864,
      "loss": 0.8236,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.5053777694702148,
      "rewards/margins": 0.21901080012321472,
      "rewards/rejected": -0.724388599395752,
      "step": 220
    },
    {
      "epoch": 0.20426287744227353,
      "grad_norm": 0.282474547624588,
      "learning_rate": 4.715508948078037e-06,
      "logits/chosen": 6.367492198944092,
      "logits/rejected": 6.273728370666504,
      "logps/chosen": -0.3519875705242157,
      "logps/rejected": -0.5284813642501831,
      "loss": 0.8027,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.5279813408851624,
      "rewards/margins": 0.2647407650947571,
      "rewards/rejected": -0.7927221059799194,
      "step": 230
    },
    {
      "epoch": 0.21314387211367672,
      "grad_norm": 0.327765554189682,
      "learning_rate": 4.690766700109659e-06,
      "logits/chosen": 5.090893268585205,
      "logits/rejected": 4.768380165100098,
      "logps/chosen": -0.3851698040962219,
      "logps/rejected": -0.6464222073554993,
      "loss": 0.7898,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.5777546167373657,
      "rewards/margins": 0.391878604888916,
      "rewards/rejected": -0.9696332812309265,
      "step": 240
    },
    {
      "epoch": 0.22202486678507993,
      "grad_norm": 0.4895865321159363,
      "learning_rate": 4.665063509461098e-06,
      "logits/chosen": 4.056812286376953,
      "logits/rejected": 3.723601818084717,
      "logps/chosen": -0.4400455951690674,
      "logps/rejected": -0.7731422781944275,
      "loss": 0.7626,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.6600683927536011,
      "rewards/margins": 0.49964505434036255,
      "rewards/rejected": -1.1597135066986084,
      "step": 250
    },
    {
      "epoch": 0.22202486678507993,
      "eval_logits/chosen": 2.420060396194458,
      "eval_logits/rejected": 2.1626052856445312,
      "eval_logps/chosen": -0.4724067151546478,
      "eval_logps/rejected": -0.8418064117431641,
      "eval_loss": 0.7631083130836487,
      "eval_rewards/accuracies": 0.6483516693115234,
      "eval_rewards/chosen": -0.7086100578308105,
      "eval_rewards/margins": 0.5540997385978699,
      "eval_rewards/rejected": -1.2627097368240356,
      "eval_runtime": 25.2418,
      "eval_samples_per_second": 28.841,
      "eval_steps_per_second": 3.605,
      "step": 250
    },
    {
      "epoch": 0.23090586145648312,
      "grad_norm": 0.46291017532348633,
      "learning_rate": 4.638410650401267e-06,
      "logits/chosen": 1.5297390222549438,
      "logits/rejected": 1.1381648778915405,
      "logps/chosen": -0.4418027997016907,
      "logps/rejected": -1.0542564392089844,
      "loss": 0.7026,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.6627041697502136,
      "rewards/margins": 0.9186803698539734,
      "rewards/rejected": -1.5813844203948975,
      "step": 260
    },
    {
      "epoch": 0.23978685612788633,
      "grad_norm": 0.9783313870429993,
      "learning_rate": 4.610819813755038e-06,
      "logits/chosen": 2.8311033248901367,
      "logits/rejected": 1.9742711782455444,
      "logps/chosen": -0.5430587530136108,
      "logps/rejected": -0.9841039776802063,
      "loss": 0.7317,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.814588189125061,
      "rewards/margins": 0.6615679860115051,
      "rewards/rejected": -1.4761559963226318,
      "step": 270
    },
    {
      "epoch": 0.24866785079928952,
      "grad_norm": 2.102562189102173,
      "learning_rate": 4.582303101775249e-06,
      "logits/chosen": 1.8241952657699585,
      "logits/rejected": 0.8777934312820435,
      "logps/chosen": -0.5624039769172668,
      "logps/rejected": -1.1460126638412476,
      "loss": 0.6887,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.8436058163642883,
      "rewards/margins": 0.8754131197929382,
      "rewards/rejected": -1.7190189361572266,
      "step": 280
    },
    {
      "epoch": 0.25754884547069273,
      "grad_norm": 0.9813026189804077,
      "learning_rate": 4.55287302283426e-06,
      "logits/chosen": 2.370732069015503,
      "logits/rejected": 1.4697134494781494,
      "logps/chosen": -0.6739786863327026,
      "logps/rejected": -1.6581566333770752,
      "loss": 0.5695,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -1.0109679698944092,
      "rewards/margins": 1.476266622543335,
      "rewards/rejected": -2.487234592437744,
      "step": 290
    },
    {
      "epoch": 0.2664298401420959,
      "grad_norm": 2.187314510345459,
      "learning_rate": 4.522542485937369e-06,
      "logits/chosen": 1.6230781078338623,
      "logits/rejected": 0.5460122227668762,
      "logps/chosen": -0.6433733701705933,
      "logps/rejected": -2.1001811027526855,
      "loss": 0.5366,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.9650601148605347,
      "rewards/margins": 2.185211658477783,
      "rewards/rejected": -3.1502718925476074,
      "step": 300
    },
    {
      "epoch": 0.2664298401420959,
      "eval_logits/chosen": 1.4087599515914917,
      "eval_logits/rejected": 0.7888947129249573,
      "eval_logps/chosen": -0.7579545974731445,
      "eval_logps/rejected": -2.0049116611480713,
      "eval_loss": 0.551510214805603,
      "eval_rewards/accuracies": 0.6813187003135681,
      "eval_rewards/chosen": -1.1369318962097168,
      "eval_rewards/margins": 1.8704355955123901,
      "eval_rewards/rejected": -3.0073673725128174,
      "eval_runtime": 25.2647,
      "eval_samples_per_second": 28.815,
      "eval_steps_per_second": 3.602,
      "step": 300
    }
  ],
  "logging_steps": 10,
  "max_steps": 1500,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 7.085793279224054e+17,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}