File size: 33,396 Bytes
1ec6a65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.44404973357015987,
  "eval_steps": 50,
  "global_step": 500,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.008880994671403197,
      "grad_norm": 0.04571289196610451,
      "learning_rate": 4.999451708687114e-06,
      "logits/chosen": 14.56671142578125,
      "logits/rejected": 15.112574577331543,
      "logps/chosen": -0.26506316661834717,
      "logps/rejected": -0.3439488410949707,
      "loss": 0.9267,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.39759472012519836,
      "rewards/margins": 0.11832849681377411,
      "rewards/rejected": -0.5159232020378113,
      "step": 10
    },
    {
      "epoch": 0.017761989342806393,
      "grad_norm": 0.0512714721262455,
      "learning_rate": 4.997807075247147e-06,
      "logits/chosen": 14.376543045043945,
      "logits/rejected": 14.862703323364258,
      "logps/chosen": -0.2708089351654053,
      "logps/rejected": -0.32412824034690857,
      "loss": 0.936,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.4062133729457855,
      "rewards/margins": 0.07997899502515793,
      "rewards/rejected": -0.4861923158168793,
      "step": 20
    },
    {
      "epoch": 0.02664298401420959,
      "grad_norm": 0.058383647352457047,
      "learning_rate": 4.9950668210706795e-06,
      "logits/chosen": 14.208717346191406,
      "logits/rejected": 15.370651245117188,
      "logps/chosen": -0.28206294775009155,
      "logps/rejected": -0.38387423753738403,
      "loss": 0.9215,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.42309442162513733,
      "rewards/margins": 0.15271687507629395,
      "rewards/rejected": -0.5758112668991089,
      "step": 30
    },
    {
      "epoch": 0.035523978685612786,
      "grad_norm": 0.06262075155973434,
      "learning_rate": 4.9912321481237616e-06,
      "logits/chosen": 14.768765449523926,
      "logits/rejected": 15.169331550598145,
      "logps/chosen": -0.27857059240341187,
      "logps/rejected": -0.3388269543647766,
      "loss": 0.9386,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.4178559184074402,
      "rewards/margins": 0.09038447588682175,
      "rewards/rejected": -0.5082404017448425,
      "step": 40
    },
    {
      "epoch": 0.04440497335701599,
      "grad_norm": 0.06259036809206009,
      "learning_rate": 4.986304738420684e-06,
      "logits/chosen": 14.950456619262695,
      "logits/rejected": 15.232122421264648,
      "logps/chosen": -0.2961367070674896,
      "logps/rejected": -0.3322262465953827,
      "loss": 0.9317,
      "rewards/accuracies": 0.44999998807907104,
      "rewards/chosen": -0.44420504570007324,
      "rewards/margins": 0.054134320467710495,
      "rewards/rejected": -0.4983394145965576,
      "step": 50
    },
    {
      "epoch": 0.04440497335701599,
      "eval_logits/chosen": 14.56529426574707,
      "eval_logits/rejected": 14.895020484924316,
      "eval_logps/chosen": -0.2806546986103058,
      "eval_logps/rejected": -0.3486972451210022,
      "eval_loss": 0.9381324052810669,
      "eval_rewards/accuracies": 0.5274725556373596,
      "eval_rewards/chosen": -0.4209820330142975,
      "eval_rewards/margins": 0.10206379741430283,
      "eval_rewards/rejected": -0.5230458974838257,
      "eval_runtime": 25.2574,
      "eval_samples_per_second": 28.823,
      "eval_steps_per_second": 3.603,
      "step": 50
    },
    {
      "epoch": 0.05328596802841918,
      "grad_norm": 0.07301533967256546,
      "learning_rate": 4.980286753286196e-06,
      "logits/chosen": 14.195574760437012,
      "logits/rejected": 15.173194885253906,
      "logps/chosen": -0.2693648636341095,
      "logps/rejected": -0.33997970819473267,
      "loss": 0.9319,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.40404725074768066,
      "rewards/margins": 0.10592226684093475,
      "rewards/rejected": -0.5099694728851318,
      "step": 60
    },
    {
      "epoch": 0.06216696269982238,
      "grad_norm": 0.0659889206290245,
      "learning_rate": 4.973180832407471e-06,
      "logits/chosen": 14.910173416137695,
      "logits/rejected": 15.361429214477539,
      "logps/chosen": -0.28456225991249084,
      "logps/rejected": -0.3702812194824219,
      "loss": 0.9185,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.42684346437454224,
      "rewards/margins": 0.12857840955257416,
      "rewards/rejected": -0.5554218292236328,
      "step": 70
    },
    {
      "epoch": 0.07104795737122557,
      "grad_norm": 0.05815625935792923,
      "learning_rate": 4.964990092676263e-06,
      "logits/chosen": 14.407182693481445,
      "logits/rejected": 14.948204040527344,
      "logps/chosen": -0.292889267206192,
      "logps/rejected": -0.3381648063659668,
      "loss": 0.9388,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.43933385610580444,
      "rewards/margins": 0.06791339069604874,
      "rewards/rejected": -0.5072472095489502,
      "step": 80
    },
    {
      "epoch": 0.07992895204262877,
      "grad_norm": 0.06627190113067627,
      "learning_rate": 4.9557181268217225e-06,
      "logits/chosen": 14.622471809387207,
      "logits/rejected": 15.167770385742188,
      "logps/chosen": -0.28155821561813354,
      "logps/rejected": -0.33633899688720703,
      "loss": 0.9256,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.4223373532295227,
      "rewards/margins": 0.08217118680477142,
      "rewards/rejected": -0.5045084953308105,
      "step": 90
    },
    {
      "epoch": 0.08880994671403197,
      "grad_norm": 0.0724545568227768,
      "learning_rate": 4.9453690018345144e-06,
      "logits/chosen": 14.289724349975586,
      "logits/rejected": 14.882037162780762,
      "logps/chosen": -0.2791440486907959,
      "logps/rejected": -0.35329627990722656,
      "loss": 0.9374,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.41871610283851624,
      "rewards/margins": 0.11122839152812958,
      "rewards/rejected": -0.5299445390701294,
      "step": 100
    },
    {
      "epoch": 0.08880994671403197,
      "eval_logits/chosen": 14.337930679321289,
      "eval_logits/rejected": 14.689269065856934,
      "eval_logps/chosen": -0.2726942300796509,
      "eval_logps/rejected": -0.34668418765068054,
      "eval_loss": 0.9302808046340942,
      "eval_rewards/accuracies": 0.5384615659713745,
      "eval_rewards/chosen": -0.40904131531715393,
      "eval_rewards/margins": 0.11098497360944748,
      "eval_rewards/rejected": -0.5200263261795044,
      "eval_runtime": 25.2585,
      "eval_samples_per_second": 28.822,
      "eval_steps_per_second": 3.603,
      "step": 100
    },
    {
      "epoch": 0.09769094138543517,
      "grad_norm": 0.08156246691942215,
      "learning_rate": 4.933947257182901e-06,
      "logits/chosen": 14.499124526977539,
      "logits/rejected": 14.916313171386719,
      "logps/chosen": -0.2798352837562561,
      "logps/rejected": -0.3477734327316284,
      "loss": 0.9243,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.4197530150413513,
      "rewards/margins": 0.10190720856189728,
      "rewards/rejected": -0.5216602087020874,
      "step": 110
    },
    {
      "epoch": 0.10657193605683836,
      "grad_norm": 0.08161844313144684,
      "learning_rate": 4.921457902821578e-06,
      "logits/chosen": 13.595013618469238,
      "logits/rejected": 14.390353202819824,
      "logps/chosen": -0.26682502031326294,
      "logps/rejected": -0.3336995542049408,
      "loss": 0.9123,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.400237500667572,
      "rewards/margins": 0.10031183809041977,
      "rewards/rejected": -0.5005493760108948,
      "step": 120
    },
    {
      "epoch": 0.11545293072824156,
      "grad_norm": 0.28624778985977173,
      "learning_rate": 4.907906416994146e-06,
      "logits/chosen": 13.711044311523438,
      "logits/rejected": 14.558542251586914,
      "logps/chosen": -0.27874043583869934,
      "logps/rejected": -0.3582325279712677,
      "loss": 0.9163,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.41811060905456543,
      "rewards/margins": 0.11923813819885254,
      "rewards/rejected": -0.537348747253418,
      "step": 130
    },
    {
      "epoch": 0.12433392539964476,
      "grad_norm": 0.10971464216709137,
      "learning_rate": 4.893298743830168e-06,
      "logits/chosen": 14.18798828125,
      "logits/rejected": 14.993026733398438,
      "logps/chosen": -0.2750400900840759,
      "logps/rejected": -0.39451608061790466,
      "loss": 0.9098,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.4125601351261139,
      "rewards/margins": 0.17921395599842072,
      "rewards/rejected": -0.5917741060256958,
      "step": 140
    },
    {
      "epoch": 0.13321492007104796,
      "grad_norm": 0.09321591258049011,
      "learning_rate": 4.8776412907378845e-06,
      "logits/chosen": 12.775139808654785,
      "logits/rejected": 13.751996994018555,
      "logps/chosen": -0.28446996212005615,
      "logps/rejected": -0.36404967308044434,
      "loss": 0.9104,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.42670494318008423,
      "rewards/margins": 0.11936960369348526,
      "rewards/rejected": -0.5460745096206665,
      "step": 150
    },
    {
      "epoch": 0.13321492007104796,
      "eval_logits/chosen": 12.97266960144043,
      "eval_logits/rejected": 13.47339916229248,
      "eval_logps/chosen": -0.27297571301460266,
      "eval_logps/rejected": -0.36854612827301025,
      "eval_loss": 0.9143257737159729,
      "eval_rewards/accuracies": 0.5824176073074341,
      "eval_rewards/chosen": -0.4094635546207428,
      "eval_rewards/margins": 0.14335563778877258,
      "eval_rewards/rejected": -0.5528191924095154,
      "eval_runtime": 25.2406,
      "eval_samples_per_second": 28.842,
      "eval_steps_per_second": 3.605,
      "step": 150
    },
    {
      "epoch": 0.14209591474245115,
      "grad_norm": 0.11029861867427826,
      "learning_rate": 4.860940925593703e-06,
      "logits/chosen": 12.677947998046875,
      "logits/rejected": 13.396716117858887,
      "logps/chosen": -0.2631794512271881,
      "logps/rejected": -0.37102141976356506,
      "loss": 0.9051,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.3947691321372986,
      "rewards/margins": 0.161762997508049,
      "rewards/rejected": -0.5565321445465088,
      "step": 160
    },
    {
      "epoch": 0.15097690941385436,
      "grad_norm": 0.15728294849395752,
      "learning_rate": 4.84320497372973e-06,
      "logits/chosen": 12.620219230651855,
      "logits/rejected": 13.189640998840332,
      "logps/chosen": -0.2947639524936676,
      "logps/rejected": -0.3843482732772827,
      "loss": 0.8906,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.442145973443985,
      "rewards/margins": 0.13437646627426147,
      "rewards/rejected": -0.5765224099159241,
      "step": 170
    },
    {
      "epoch": 0.15985790408525755,
      "grad_norm": 0.31504154205322266,
      "learning_rate": 4.824441214720629e-06,
      "logits/chosen": 11.487619400024414,
      "logits/rejected": 12.33470344543457,
      "logps/chosen": -0.271095871925354,
      "logps/rejected": -0.4252637028694153,
      "loss": 0.8766,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.406643807888031,
      "rewards/margins": 0.2312517911195755,
      "rewards/rejected": -0.6378955245018005,
      "step": 180
    },
    {
      "epoch": 0.16873889875666073,
      "grad_norm": 0.19222252070903778,
      "learning_rate": 4.804657878971252e-06,
      "logits/chosen": 10.093737602233887,
      "logits/rejected": 10.851752281188965,
      "logps/chosen": -0.2679918110370636,
      "logps/rejected": -0.437336266040802,
      "loss": 0.884,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.4019877314567566,
      "rewards/margins": 0.2540166974067688,
      "rewards/rejected": -0.6560044288635254,
      "step": 190
    },
    {
      "epoch": 0.17761989342806395,
      "grad_norm": 0.2275688648223877,
      "learning_rate": 4.783863644106502e-06,
      "logits/chosen": 9.483477592468262,
      "logits/rejected": 10.106366157531738,
      "logps/chosen": -0.2957404553890228,
      "logps/rejected": -0.40739065408706665,
      "loss": 0.8767,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.4436107575893402,
      "rewards/margins": 0.16747523844242096,
      "rewards/rejected": -0.6110859513282776,
      "step": 200
    },
    {
      "epoch": 0.17761989342806395,
      "eval_logits/chosen": 8.491498947143555,
      "eval_logits/rejected": 8.999146461486816,
      "eval_logps/chosen": -0.3135836124420166,
      "eval_logps/rejected": -0.4829566180706024,
      "eval_loss": 0.8664290904998779,
      "eval_rewards/accuracies": 0.6263736486434937,
      "eval_rewards/chosen": -0.4703753888607025,
      "eval_rewards/margins": 0.2540595233440399,
      "eval_rewards/rejected": -0.7244349122047424,
      "eval_runtime": 25.2553,
      "eval_samples_per_second": 28.826,
      "eval_steps_per_second": 3.603,
      "step": 200
    },
    {
      "epoch": 0.18650088809946713,
      "grad_norm": 0.27885496616363525,
      "learning_rate": 4.762067631165049e-06,
      "logits/chosen": 7.234966278076172,
      "logits/rejected": 8.313450813293457,
      "logps/chosen": -0.29102542996406555,
      "logps/rejected": -0.49241799116134644,
      "loss": 0.8556,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.43653813004493713,
      "rewards/margins": 0.3020888566970825,
      "rewards/rejected": -0.7386269569396973,
      "step": 210
    },
    {
      "epoch": 0.19538188277087035,
      "grad_norm": 0.29907363653182983,
      "learning_rate": 4.7392794005985324e-06,
      "logits/chosen": 7.907521724700928,
      "logits/rejected": 8.253190994262695,
      "logps/chosen": -0.33691853284835815,
      "logps/rejected": -0.4829257130622864,
      "loss": 0.8236,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.5053777694702148,
      "rewards/margins": 0.21901080012321472,
      "rewards/rejected": -0.724388599395752,
      "step": 220
    },
    {
      "epoch": 0.20426287744227353,
      "grad_norm": 0.282474547624588,
      "learning_rate": 4.715508948078037e-06,
      "logits/chosen": 6.367492198944092,
      "logits/rejected": 6.273728370666504,
      "logps/chosen": -0.3519875705242157,
      "logps/rejected": -0.5284813642501831,
      "loss": 0.8027,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.5279813408851624,
      "rewards/margins": 0.2647407650947571,
      "rewards/rejected": -0.7927221059799194,
      "step": 230
    },
    {
      "epoch": 0.21314387211367672,
      "grad_norm": 0.327765554189682,
      "learning_rate": 4.690766700109659e-06,
      "logits/chosen": 5.090893268585205,
      "logits/rejected": 4.768380165100098,
      "logps/chosen": -0.3851698040962219,
      "logps/rejected": -0.6464222073554993,
      "loss": 0.7898,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.5777546167373657,
      "rewards/margins": 0.391878604888916,
      "rewards/rejected": -0.9696332812309265,
      "step": 240
    },
    {
      "epoch": 0.22202486678507993,
      "grad_norm": 0.4895865321159363,
      "learning_rate": 4.665063509461098e-06,
      "logits/chosen": 4.056812286376953,
      "logits/rejected": 3.723601818084717,
      "logps/chosen": -0.4400455951690674,
      "logps/rejected": -0.7731422781944275,
      "loss": 0.7626,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.6600683927536011,
      "rewards/margins": 0.49964505434036255,
      "rewards/rejected": -1.1597135066986084,
      "step": 250
    },
    {
      "epoch": 0.22202486678507993,
      "eval_logits/chosen": 2.420060396194458,
      "eval_logits/rejected": 2.1626052856445312,
      "eval_logps/chosen": -0.4724067151546478,
      "eval_logps/rejected": -0.8418064117431641,
      "eval_loss": 0.7631083130836487,
      "eval_rewards/accuracies": 0.6483516693115234,
      "eval_rewards/chosen": -0.7086100578308105,
      "eval_rewards/margins": 0.5540997385978699,
      "eval_rewards/rejected": -1.2627097368240356,
      "eval_runtime": 25.2418,
      "eval_samples_per_second": 28.841,
      "eval_steps_per_second": 3.605,
      "step": 250
    },
    {
      "epoch": 0.23090586145648312,
      "grad_norm": 0.46291017532348633,
      "learning_rate": 4.638410650401267e-06,
      "logits/chosen": 1.5297390222549438,
      "logits/rejected": 1.1381648778915405,
      "logps/chosen": -0.4418027997016907,
      "logps/rejected": -1.0542564392089844,
      "loss": 0.7026,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.6627041697502136,
      "rewards/margins": 0.9186803698539734,
      "rewards/rejected": -1.5813844203948975,
      "step": 260
    },
    {
      "epoch": 0.23978685612788633,
      "grad_norm": 0.9783313870429993,
      "learning_rate": 4.610819813755038e-06,
      "logits/chosen": 2.8311033248901367,
      "logits/rejected": 1.9742711782455444,
      "logps/chosen": -0.5430587530136108,
      "logps/rejected": -0.9841039776802063,
      "loss": 0.7317,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.814588189125061,
      "rewards/margins": 0.6615679860115051,
      "rewards/rejected": -1.4761559963226318,
      "step": 270
    },
    {
      "epoch": 0.24866785079928952,
      "grad_norm": 2.102562189102173,
      "learning_rate": 4.582303101775249e-06,
      "logits/chosen": 1.8241952657699585,
      "logits/rejected": 0.8777934312820435,
      "logps/chosen": -0.5624039769172668,
      "logps/rejected": -1.1460126638412476,
      "loss": 0.6887,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.8436058163642883,
      "rewards/margins": 0.8754131197929382,
      "rewards/rejected": -1.7190189361572266,
      "step": 280
    },
    {
      "epoch": 0.25754884547069273,
      "grad_norm": 0.9813026189804077,
      "learning_rate": 4.55287302283426e-06,
      "logits/chosen": 2.370732069015503,
      "logits/rejected": 1.4697134494781494,
      "logps/chosen": -0.6739786863327026,
      "logps/rejected": -1.6581566333770752,
      "loss": 0.5695,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -1.0109679698944092,
      "rewards/margins": 1.476266622543335,
      "rewards/rejected": -2.487234592437744,
      "step": 290
    },
    {
      "epoch": 0.2664298401420959,
      "grad_norm": 2.187314510345459,
      "learning_rate": 4.522542485937369e-06,
      "logits/chosen": 1.6230781078338623,
      "logits/rejected": 0.5460122227668762,
      "logps/chosen": -0.6433733701705933,
      "logps/rejected": -2.1001811027526855,
      "loss": 0.5366,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.9650601148605347,
      "rewards/margins": 2.185211658477783,
      "rewards/rejected": -3.1502718925476074,
      "step": 300
    },
    {
      "epoch": 0.2664298401420959,
      "eval_logits/chosen": 1.4087599515914917,
      "eval_logits/rejected": 0.7888947129249573,
      "eval_logps/chosen": -0.7579545974731445,
      "eval_logps/rejected": -2.0049116611480713,
      "eval_loss": 0.551510214805603,
      "eval_rewards/accuracies": 0.6813187003135681,
      "eval_rewards/chosen": -1.1369318962097168,
      "eval_rewards/margins": 1.8704355955123901,
      "eval_rewards/rejected": -3.0073673725128174,
      "eval_runtime": 25.2647,
      "eval_samples_per_second": 28.815,
      "eval_steps_per_second": 3.602,
      "step": 300
    },
    {
      "epoch": 0.2753108348134991,
      "grad_norm": 0.7035408616065979,
      "learning_rate": 4.491324795060491e-06,
      "logits/chosen": 1.5831315517425537,
      "logits/rejected": 0.46730250120162964,
      "logps/chosen": -0.7262418866157532,
      "logps/rejected": -2.1209158897399902,
      "loss": 0.5524,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -1.0893628597259521,
      "rewards/margins": 2.092010974884033,
      "rewards/rejected": -3.1813735961914062,
      "step": 310
    },
    {
      "epoch": 0.2841918294849023,
      "grad_norm": 0.5678634643554688,
      "learning_rate": 4.4592336433146e-06,
      "logits/chosen": 1.265734076499939,
      "logits/rejected": 0.7576489448547363,
      "logps/chosen": -0.7938942313194275,
      "logps/rejected": -2.3495612144470215,
      "loss": 0.5233,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -1.1908413171768188,
      "rewards/margins": 2.333500385284424,
      "rewards/rejected": -3.5243420600891113,
      "step": 320
    },
    {
      "epoch": 0.29307282415630553,
      "grad_norm": 1.1373224258422852,
      "learning_rate": 4.426283106939474e-06,
      "logits/chosen": 2.977414846420288,
      "logits/rejected": 2.1573710441589355,
      "logps/chosen": -0.8513160943984985,
      "logps/rejected": -2.4125566482543945,
      "loss": 0.556,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -1.2769742012023926,
      "rewards/margins": 2.341860771179199,
      "rewards/rejected": -3.6188347339630127,
      "step": 330
    },
    {
      "epoch": 0.3019538188277087,
      "grad_norm": 4.7876176834106445,
      "learning_rate": 4.3924876391293915e-06,
      "logits/chosen": 2.4026589393615723,
      "logits/rejected": 1.207395315170288,
      "logps/chosen": -0.8529679179191589,
      "logps/rejected": -2.456879138946533,
      "loss": 0.5592,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -1.279451847076416,
      "rewards/margins": 2.4058666229248047,
      "rewards/rejected": -3.6853187084198,
      "step": 340
    },
    {
      "epoch": 0.3108348134991119,
      "grad_norm": 0.5053763389587402,
      "learning_rate": 4.357862063693486e-06,
      "logits/chosen": 2.434265375137329,
      "logits/rejected": 1.2504141330718994,
      "logps/chosen": -0.9489291310310364,
      "logps/rejected": -2.8521530628204346,
      "loss": 0.4737,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -1.423393726348877,
      "rewards/margins": 2.8548355102539062,
      "rewards/rejected": -4.278229236602783,
      "step": 350
    },
    {
      "epoch": 0.3108348134991119,
      "eval_logits/chosen": 1.6632592678070068,
      "eval_logits/rejected": 1.235045075416565,
      "eval_logps/chosen": -1.0692518949508667,
      "eval_logps/rejected": -2.7428486347198486,
      "eval_loss": 0.5021397471427917,
      "eval_rewards/accuracies": 0.692307710647583,
      "eval_rewards/chosen": -1.6038777828216553,
      "eval_rewards/margins": 2.510395050048828,
      "eval_rewards/rejected": -4.1142730712890625,
      "eval_runtime": 25.2582,
      "eval_samples_per_second": 28.822,
      "eval_steps_per_second": 3.603,
      "step": 350
    },
    {
      "epoch": 0.3197158081705151,
      "grad_norm": 0.8040274381637573,
      "learning_rate": 4.322421568553529e-06,
      "logits/chosen": 1.187036395072937,
      "logits/rejected": 0.4290788769721985,
      "logps/chosen": -1.1015206575393677,
      "logps/rejected": -2.919748544692993,
      "loss": 0.489,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -1.6522810459136963,
      "rewards/margins": 2.727341651916504,
      "rewards/rejected": -4.379622459411621,
      "step": 360
    },
    {
      "epoch": 0.3285968028419183,
      "grad_norm": 0.9299562573432922,
      "learning_rate": 4.286181699082008e-06,
      "logits/chosen": 2.5852127075195312,
      "logits/rejected": 2.0419259071350098,
      "logps/chosen": -1.1498607397079468,
      "logps/rejected": -3.0336194038391113,
      "loss": 0.4812,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -1.724791169166565,
      "rewards/margins": 2.8256375789642334,
      "rewards/rejected": -4.55042839050293,
      "step": 370
    },
    {
      "epoch": 0.33747779751332146,
      "grad_norm": 1.7739671468734741,
      "learning_rate": 4.249158351283414e-06,
      "logits/chosen": 2.246245861053467,
      "logits/rejected": 1.5551975965499878,
      "logps/chosen": -1.254900574684143,
      "logps/rejected": -3.206178665161133,
      "loss": 0.4651,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -1.8823509216308594,
      "rewards/margins": 2.926917552947998,
      "rewards/rejected": -4.809267997741699,
      "step": 380
    },
    {
      "epoch": 0.3463587921847247,
      "grad_norm": 4.380665302276611,
      "learning_rate": 4.211367764821722e-06,
      "logits/chosen": 3.0754549503326416,
      "logits/rejected": 2.622124433517456,
      "logps/chosen": -1.9250037670135498,
      "logps/rejected": -3.69482421875,
      "loss": 0.4292,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -2.8875060081481934,
      "rewards/margins": 2.6547305583953857,
      "rewards/rejected": -5.542236328125,
      "step": 390
    },
    {
      "epoch": 0.3552397868561279,
      "grad_norm": 1.5087212324142456,
      "learning_rate": 4.172826515897146e-06,
      "logits/chosen": 2.2718021869659424,
      "logits/rejected": 1.8861210346221924,
      "logps/chosen": -2.4473955631256104,
      "logps/rejected": -4.387387752532959,
      "loss": 0.3902,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -3.671093702316284,
      "rewards/margins": 2.9099888801574707,
      "rewards/rejected": -6.581082344055176,
      "step": 400
    },
    {
      "epoch": 0.3552397868561279,
      "eval_logits/chosen": 1.759078860282898,
      "eval_logits/rejected": 1.5246928930282593,
      "eval_logps/chosen": -2.720665454864502,
      "eval_logps/rejected": -4.613493919372559,
      "eval_loss": 0.4054907560348511,
      "eval_rewards/accuracies": 0.8791208863258362,
      "eval_rewards/chosen": -4.080998420715332,
      "eval_rewards/margins": 2.839242696762085,
      "eval_rewards/rejected": -6.920241355895996,
      "eval_runtime": 25.2363,
      "eval_samples_per_second": 28.847,
      "eval_steps_per_second": 3.606,
      "step": 400
    },
    {
      "epoch": 0.3641207815275311,
      "grad_norm": 6.079421043395996,
      "learning_rate": 4.133551509975264e-06,
      "logits/chosen": 1.8841949701309204,
      "logits/rejected": 1.3479797840118408,
      "logps/chosen": -2.517265796661377,
      "logps/rejected": -4.453648567199707,
      "loss": 0.3977,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -3.7758986949920654,
      "rewards/margins": 2.9045748710632324,
      "rewards/rejected": -6.680473327636719,
      "step": 410
    },
    {
      "epoch": 0.37300177619893427,
      "grad_norm": 3.0998194217681885,
      "learning_rate": 4.093559974371725e-06,
      "logits/chosen": 1.6409276723861694,
      "logits/rejected": 1.2141990661621094,
      "logps/chosen": -2.2561168670654297,
      "logps/rejected": -4.470211029052734,
      "loss": 0.3527,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -3.3841750621795654,
      "rewards/margins": 3.321141004562378,
      "rewards/rejected": -6.70531702041626,
      "step": 420
    },
    {
      "epoch": 0.38188277087033745,
      "grad_norm": 6.982161045074463,
      "learning_rate": 4.052869450695776e-06,
      "logits/chosen": 2.835188388824463,
      "logits/rejected": 2.3657329082489014,
      "logps/chosen": -2.8557300567626953,
      "logps/rejected": -5.075521469116211,
      "loss": 0.387,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -4.283595085144043,
      "rewards/margins": 3.3296875953674316,
      "rewards/rejected": -7.613282680511475,
      "step": 430
    },
    {
      "epoch": 0.3907637655417407,
      "grad_norm": 2.139338970184326,
      "learning_rate": 4.011497787155938e-06,
      "logits/chosen": 2.126509189605713,
      "logits/rejected": 1.459567904472351,
      "logps/chosen": -3.1412863731384277,
      "logps/rejected": -5.423466682434082,
      "loss": 0.3611,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -4.711928844451904,
      "rewards/margins": 3.4232699871063232,
      "rewards/rejected": -8.135198593139648,
      "step": 440
    },
    {
      "epoch": 0.3996447602131439,
      "grad_norm": 1.7899377346038818,
      "learning_rate": 3.969463130731183e-06,
      "logits/chosen": 2.4551379680633545,
      "logits/rejected": 2.0784289836883545,
      "logps/chosen": -3.098043203353882,
      "logps/rejected": -5.300747871398926,
      "loss": 0.354,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -4.647065162658691,
      "rewards/margins": 3.3040566444396973,
      "rewards/rejected": -7.951122283935547,
      "step": 450
    },
    {
      "epoch": 0.3996447602131439,
      "eval_logits/chosen": 1.9761625528335571,
      "eval_logits/rejected": 1.6654667854309082,
      "eval_logps/chosen": -2.8789772987365723,
      "eval_logps/rejected": -5.1105055809021,
      "eval_loss": 0.36211252212524414,
      "eval_rewards/accuracies": 0.8791208863258362,
      "eval_rewards/chosen": -4.3184661865234375,
      "eval_rewards/margins": 3.347292423248291,
      "eval_rewards/rejected": -7.6657586097717285,
      "eval_runtime": 25.2549,
      "eval_samples_per_second": 28.826,
      "eval_steps_per_second": 3.603,
      "step": 450
    },
    {
      "epoch": 0.40852575488454707,
      "grad_norm": 1.9171936511993408,
      "learning_rate": 3.92678391921108e-06,
      "logits/chosen": 2.1672446727752686,
      "logits/rejected": 1.6228408813476562,
      "logps/chosen": -2.688931703567505,
      "logps/rejected": -5.2408246994018555,
      "loss": 0.3266,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -4.033397197723389,
      "rewards/margins": 3.8278393745422363,
      "rewards/rejected": -7.861237525939941,
      "step": 460
    },
    {
      "epoch": 0.41740674955595025,
      "grad_norm": 1.702635407447815,
      "learning_rate": 3.88347887310836e-06,
      "logits/chosen": 2.3164448738098145,
      "logits/rejected": 2.047529697418213,
      "logps/chosen": -2.6861701011657715,
      "logps/rejected": -5.629918098449707,
      "loss": 0.3339,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -4.02925443649292,
      "rewards/margins": 4.415622711181641,
      "rewards/rejected": -8.444877624511719,
      "step": 470
    },
    {
      "epoch": 0.42628774422735344,
      "grad_norm": 2.48634934425354,
      "learning_rate": 3.839566987447492e-06,
      "logits/chosen": 2.5225472450256348,
      "logits/rejected": 2.0870003700256348,
      "logps/chosen": -3.041111946105957,
      "logps/rejected": -5.3499016761779785,
      "loss": 0.3226,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -4.561667442321777,
      "rewards/margins": 3.4631850719451904,
      "rewards/rejected": -8.024852752685547,
      "step": 480
    },
    {
      "epoch": 0.4351687388987567,
      "grad_norm": 4.728499412536621,
      "learning_rate": 3.795067523432826e-06,
      "logits/chosen": 2.33893084526062,
      "logits/rejected": 1.7909936904907227,
      "logps/chosen": -2.7356209754943848,
      "logps/rejected": -5.33417272567749,
      "loss": 0.322,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -4.103431224822998,
      "rewards/margins": 3.8978283405303955,
      "rewards/rejected": -8.001258850097656,
      "step": 490
    },
    {
      "epoch": 0.44404973357015987,
      "grad_norm": 8.412679672241211,
      "learning_rate": 3.7500000000000005e-06,
      "logits/chosen": 2.788668632507324,
      "logits/rejected": 2.439873695373535,
      "logps/chosen": -3.3219153881073,
      "logps/rejected": -5.992051124572754,
      "loss": 0.3075,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -4.98287296295166,
      "rewards/margins": 4.005204200744629,
      "rewards/rejected": -8.988077163696289,
      "step": 500
    },
    {
      "epoch": 0.44404973357015987,
      "eval_logits/chosen": 2.165436029434204,
      "eval_logits/rejected": 1.8186790943145752,
      "eval_logps/chosen": -3.4299349784851074,
      "eval_logps/rejected": -6.0660552978515625,
      "eval_loss": 0.3319137990474701,
      "eval_rewards/accuracies": 0.8901098966598511,
      "eval_rewards/chosen": -5.14490270614624,
      "eval_rewards/margins": 3.954181671142578,
      "eval_rewards/rejected": -9.09908390045166,
      "eval_runtime": 25.2602,
      "eval_samples_per_second": 28.82,
      "eval_steps_per_second": 3.603,
      "step": 500
    }
  ],
  "logging_steps": 10,
  "max_steps": 1500,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 1.1781745164429558e+18,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}