File size: 33,604 Bytes
dcb3ce4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.3624173235480656,
  "eval_steps": 50,
  "global_step": 500,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.007248346470961312,
      "grad_norm": 0.07201674580574036,
      "learning_rate": 4.999451708687114e-06,
      "logits/chosen": -1.9793262481689453,
      "logits/rejected": -2.5381760597229004,
      "logps/chosen": -0.28126341104507446,
      "logps/rejected": -0.3779803514480591,
      "loss": 7.3904,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.4218950867652893,
      "rewards/margins": 0.14507544040679932,
      "rewards/rejected": -0.5669704675674438,
      "step": 10
    },
    {
      "epoch": 0.014496692941922623,
      "grad_norm": 0.07562297582626343,
      "learning_rate": 4.997807075247147e-06,
      "logits/chosen": -2.0567643642425537,
      "logits/rejected": -2.4989147186279297,
      "logps/chosen": -0.27690139412879944,
      "logps/rejected": -0.33544114232063293,
      "loss": 7.3756,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.41535210609436035,
      "rewards/margins": 0.08780960738658905,
      "rewards/rejected": -0.5031617283821106,
      "step": 20
    },
    {
      "epoch": 0.021745039412883936,
      "grad_norm": 0.09685570746660233,
      "learning_rate": 4.9950668210706795e-06,
      "logits/chosen": -2.10174298286438,
      "logits/rejected": -2.378197431564331,
      "logps/chosen": -0.26717427372932434,
      "logps/rejected": -0.30565372109413147,
      "loss": 7.451,
      "rewards/accuracies": 0.4375,
      "rewards/chosen": -0.4007614254951477,
      "rewards/margins": 0.05771917849779129,
      "rewards/rejected": -0.4584805369377136,
      "step": 30
    },
    {
      "epoch": 0.028993385883845247,
      "grad_norm": 0.08213861286640167,
      "learning_rate": 4.9912321481237616e-06,
      "logits/chosen": -2.1633317470550537,
      "logits/rejected": -2.387866497039795,
      "logps/chosen": -0.27634260058403015,
      "logps/rejected": -0.37035584449768066,
      "loss": 7.3892,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.4145139157772064,
      "rewards/margins": 0.14101983606815338,
      "rewards/rejected": -0.555533766746521,
      "step": 40
    },
    {
      "epoch": 0.03624173235480656,
      "grad_norm": 0.08846044540405273,
      "learning_rate": 4.986304738420684e-06,
      "logits/chosen": -2.1402599811553955,
      "logits/rejected": -2.4459526538848877,
      "logps/chosen": -0.2535383999347687,
      "logps/rejected": -0.3090876042842865,
      "loss": 7.5171,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.3803076148033142,
      "rewards/margins": 0.08332376182079315,
      "rewards/rejected": -0.4636313319206238,
      "step": 50
    },
    {
      "epoch": 0.03624173235480656,
      "eval_logits/chosen": -2.1165692806243896,
      "eval_logits/rejected": -2.476428747177124,
      "eval_logps/chosen": -0.2828062176704407,
      "eval_logps/rejected": -0.3432886600494385,
      "eval_loss": 0.9120001792907715,
      "eval_rewards/accuracies": 0.5089285969734192,
      "eval_rewards/chosen": -0.4242093861103058,
      "eval_rewards/margins": 0.09072363376617432,
      "eval_rewards/rejected": -0.5149329900741577,
      "eval_runtime": 30.971,
      "eval_samples_per_second": 28.801,
      "eval_steps_per_second": 3.616,
      "step": 50
    },
    {
      "epoch": 0.04349007882576787,
      "grad_norm": 0.11753705143928528,
      "learning_rate": 4.980286753286196e-06,
      "logits/chosen": -2.0575368404388428,
      "logits/rejected": -2.5456700325012207,
      "logps/chosen": -0.24622221291065216,
      "logps/rejected": -0.32402220368385315,
      "loss": 7.3926,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.36933332681655884,
      "rewards/margins": 0.11669999361038208,
      "rewards/rejected": -0.48603329062461853,
      "step": 60
    },
    {
      "epoch": 0.05073842529672919,
      "grad_norm": 0.09996571391820908,
      "learning_rate": 4.973180832407471e-06,
      "logits/chosen": -1.9278606176376343,
      "logits/rejected": -2.4620182514190674,
      "logps/chosen": -0.2596542239189148,
      "logps/rejected": -0.3665880560874939,
      "loss": 7.185,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.3894812762737274,
      "rewards/margins": 0.160400852560997,
      "rewards/rejected": -0.5498821139335632,
      "step": 70
    },
    {
      "epoch": 0.057986771767690494,
      "grad_norm": 0.07287321239709854,
      "learning_rate": 4.964990092676263e-06,
      "logits/chosen": -2.078031063079834,
      "logits/rejected": -2.461479663848877,
      "logps/chosen": -0.24513795971870422,
      "logps/rejected": -0.3448730707168579,
      "loss": 7.365,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.36770695447921753,
      "rewards/margins": 0.14960262179374695,
      "rewards/rejected": -0.5173095464706421,
      "step": 80
    },
    {
      "epoch": 0.0652351182386518,
      "grad_norm": 0.09656044095754623,
      "learning_rate": 4.9557181268217225e-06,
      "logits/chosen": -1.9979238510131836,
      "logits/rejected": -2.4381277561187744,
      "logps/chosen": -0.24047240614891052,
      "logps/rejected": -0.3277527987957001,
      "loss": 7.2664,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.36070865392684937,
      "rewards/margins": 0.13092057406902313,
      "rewards/rejected": -0.4916292130947113,
      "step": 90
    },
    {
      "epoch": 0.07248346470961312,
      "grad_norm": 0.08125138282775879,
      "learning_rate": 4.9453690018345144e-06,
      "logits/chosen": -1.8948112726211548,
      "logits/rejected": -2.4755520820617676,
      "logps/chosen": -0.20189261436462402,
      "logps/rejected": -0.29732149839401245,
      "loss": 7.2542,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.30283889174461365,
      "rewards/margins": 0.14314329624176025,
      "rewards/rejected": -0.44598227739334106,
      "step": 100
    },
    {
      "epoch": 0.07248346470961312,
      "eval_logits/chosen": -2.12322735786438,
      "eval_logits/rejected": -2.481174945831299,
      "eval_logps/chosen": -0.2438412606716156,
      "eval_logps/rejected": -0.3260033428668976,
      "eval_loss": 0.891861081123352,
      "eval_rewards/accuracies": 0.5625,
      "eval_rewards/chosen": -0.3657619059085846,
      "eval_rewards/margins": 0.12324309349060059,
      "eval_rewards/rejected": -0.48900502920150757,
      "eval_runtime": 30.3299,
      "eval_samples_per_second": 29.41,
      "eval_steps_per_second": 3.693,
      "step": 100
    },
    {
      "epoch": 0.07973181118057443,
      "grad_norm": 0.05962231010198593,
      "learning_rate": 4.933947257182901e-06,
      "logits/chosen": -2.1134355068206787,
      "logits/rejected": -2.516538381576538,
      "logps/chosen": -0.237023264169693,
      "logps/rejected": -0.31476154923439026,
      "loss": 7.1749,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.3555349111557007,
      "rewards/margins": 0.11660744249820709,
      "rewards/rejected": -0.4721423089504242,
      "step": 110
    },
    {
      "epoch": 0.08698015765153574,
      "grad_norm": 0.06015922501683235,
      "learning_rate": 4.921457902821578e-06,
      "logits/chosen": -2.0215041637420654,
      "logits/rejected": -2.4902031421661377,
      "logps/chosen": -0.1890055537223816,
      "logps/rejected": -0.3192065358161926,
      "loss": 7.142,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.2835083603858948,
      "rewards/margins": 0.19530144333839417,
      "rewards/rejected": -0.47880974411964417,
      "step": 120
    },
    {
      "epoch": 0.09422850412249706,
      "grad_norm": 0.06430571526288986,
      "learning_rate": 4.907906416994146e-06,
      "logits/chosen": -2.0684752464294434,
      "logits/rejected": -2.510018825531006,
      "logps/chosen": -0.2073744535446167,
      "logps/rejected": -0.3121300935745239,
      "loss": 7.1438,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.31106168031692505,
      "rewards/margins": 0.15713343024253845,
      "rewards/rejected": -0.4681951403617859,
      "step": 130
    },
    {
      "epoch": 0.10147685059345837,
      "grad_norm": 0.08829955011606216,
      "learning_rate": 4.893298743830168e-06,
      "logits/chosen": -2.026458263397217,
      "logits/rejected": -2.496157646179199,
      "logps/chosen": -0.19946983456611633,
      "logps/rejected": -0.32050156593322754,
      "loss": 7.1196,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.2992047667503357,
      "rewards/margins": 0.18154758214950562,
      "rewards/rejected": -0.4807523787021637,
      "step": 140
    },
    {
      "epoch": 0.10872519706441967,
      "grad_norm": 0.09773921221494675,
      "learning_rate": 4.8776412907378845e-06,
      "logits/chosen": -2.101029872894287,
      "logits/rejected": -2.5849032402038574,
      "logps/chosen": -0.18889756500720978,
      "logps/rejected": -0.36427801847457886,
      "loss": 7.1227,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.28334635496139526,
      "rewards/margins": 0.26307064294815063,
      "rewards/rejected": -0.5464169979095459,
      "step": 150
    },
    {
      "epoch": 0.10872519706441967,
      "eval_logits/chosen": -2.1945455074310303,
      "eval_logits/rejected": -2.559415578842163,
      "eval_logps/chosen": -0.22209034860134125,
      "eval_logps/rejected": -0.32476040720939636,
      "eval_loss": 0.8760393261909485,
      "eval_rewards/accuracies": 0.5803571343421936,
      "eval_rewards/chosen": -0.33313554525375366,
      "eval_rewards/margins": 0.15400508046150208,
      "eval_rewards/rejected": -0.48714062571525574,
      "eval_runtime": 30.3484,
      "eval_samples_per_second": 29.392,
      "eval_steps_per_second": 3.69,
      "step": 150
    },
    {
      "epoch": 0.11597354353538099,
      "grad_norm": 0.09142427891492844,
      "learning_rate": 4.860940925593703e-06,
      "logits/chosen": -2.235069751739502,
      "logits/rejected": -2.5513346195220947,
      "logps/chosen": -0.19598741829395294,
      "logps/rejected": -0.2772120535373688,
      "loss": 7.0272,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.2939811050891876,
      "rewards/margins": 0.12183700501918793,
      "rewards/rejected": -0.41581812500953674,
      "step": 160
    },
    {
      "epoch": 0.1232218900063423,
      "grad_norm": 0.11735275387763977,
      "learning_rate": 4.84320497372973e-06,
      "logits/chosen": -2.154263496398926,
      "logits/rejected": -2.512010335922241,
      "logps/chosen": -0.1798369437456131,
      "logps/rejected": -0.30910637974739075,
      "loss": 7.1603,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.26975542306900024,
      "rewards/margins": 0.19390416145324707,
      "rewards/rejected": -0.4636595845222473,
      "step": 170
    },
    {
      "epoch": 0.1304702364773036,
      "grad_norm": 0.09398588538169861,
      "learning_rate": 4.824441214720629e-06,
      "logits/chosen": -2.292147159576416,
      "logits/rejected": -2.581425189971924,
      "logps/chosen": -0.18498703837394714,
      "logps/rejected": -0.31237050890922546,
      "loss": 6.995,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.2774805426597595,
      "rewards/margins": 0.19107523560523987,
      "rewards/rejected": -0.4685557782649994,
      "step": 180
    },
    {
      "epoch": 0.13771858294826492,
      "grad_norm": 0.13551996648311615,
      "learning_rate": 4.804657878971252e-06,
      "logits/chosen": -2.274120330810547,
      "logits/rejected": -2.5797386169433594,
      "logps/chosen": -0.21168990433216095,
      "logps/rejected": -0.34927254915237427,
      "loss": 6.9941,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.317534863948822,
      "rewards/margins": 0.20637397468090057,
      "rewards/rejected": -0.5239088535308838,
      "step": 190
    },
    {
      "epoch": 0.14496692941922623,
      "grad_norm": 0.15515944361686707,
      "learning_rate": 4.783863644106502e-06,
      "logits/chosen": -2.2116379737854004,
      "logits/rejected": -2.6693203449249268,
      "logps/chosen": -0.1971816122531891,
      "logps/rejected": -0.3498842120170593,
      "loss": 7.0413,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.2957724332809448,
      "rewards/margins": 0.22905388474464417,
      "rewards/rejected": -0.5248263478279114,
      "step": 200
    },
    {
      "epoch": 0.14496692941922623,
      "eval_logits/chosen": -2.3369696140289307,
      "eval_logits/rejected": -2.7298672199249268,
      "eval_logps/chosen": -0.22749511897563934,
      "eval_logps/rejected": -0.3613782525062561,
      "eval_loss": 0.8543878793716431,
      "eval_rewards/accuracies": 0.5714285969734192,
      "eval_rewards/chosen": -0.34124264121055603,
      "eval_rewards/margins": 0.2008247673511505,
      "eval_rewards/rejected": -0.5420674681663513,
      "eval_runtime": 30.3397,
      "eval_samples_per_second": 29.4,
      "eval_steps_per_second": 3.692,
      "step": 200
    },
    {
      "epoch": 0.15221527589018755,
      "grad_norm": 0.13039635121822357,
      "learning_rate": 4.762067631165049e-06,
      "logits/chosen": -2.2575619220733643,
      "logits/rejected": -2.755174160003662,
      "logps/chosen": -0.199218288064003,
      "logps/rejected": -0.33260637521743774,
      "loss": 6.882,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.2988274097442627,
      "rewards/margins": 0.20008206367492676,
      "rewards/rejected": -0.49890947341918945,
      "step": 210
    },
    {
      "epoch": 0.15946362236114886,
      "grad_norm": 0.15020275115966797,
      "learning_rate": 4.7392794005985324e-06,
      "logits/chosen": -2.2949752807617188,
      "logits/rejected": -2.7288482189178467,
      "logps/chosen": -0.21987763047218323,
      "logps/rejected": -0.385539174079895,
      "loss": 6.9158,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.3298164904117584,
      "rewards/margins": 0.24849233031272888,
      "rewards/rejected": -0.5783087611198425,
      "step": 220
    },
    {
      "epoch": 0.16671196883211017,
      "grad_norm": 0.16104522347450256,
      "learning_rate": 4.715508948078037e-06,
      "logits/chosen": -2.1837964057922363,
      "logits/rejected": -2.813563585281372,
      "logps/chosen": -0.2069036215543747,
      "logps/rejected": -0.3996518552303314,
      "loss": 6.8503,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.31035539507865906,
      "rewards/margins": 0.28912240266799927,
      "rewards/rejected": -0.5994777679443359,
      "step": 230
    },
    {
      "epoch": 0.1739603153030715,
      "grad_norm": 0.16533692181110382,
      "learning_rate": 4.690766700109659e-06,
      "logits/chosen": -2.314570188522339,
      "logits/rejected": -2.8867621421813965,
      "logps/chosen": -0.18964803218841553,
      "logps/rejected": -0.3997463583946228,
      "loss": 6.667,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.2844720482826233,
      "rewards/margins": 0.3151474893093109,
      "rewards/rejected": -0.5996195077896118,
      "step": 240
    },
    {
      "epoch": 0.1812086617740328,
      "grad_norm": 0.24991220235824585,
      "learning_rate": 4.665063509461098e-06,
      "logits/chosen": -2.46189546585083,
      "logits/rejected": -2.847446918487549,
      "logps/chosen": -0.24189543724060059,
      "logps/rejected": -0.4183991551399231,
      "loss": 6.7146,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.3628431558609009,
      "rewards/margins": 0.26475557684898376,
      "rewards/rejected": -0.6275987029075623,
      "step": 250
    },
    {
      "epoch": 0.1812086617740328,
      "eval_logits/chosen": -2.5230772495269775,
      "eval_logits/rejected": -2.9182300567626953,
      "eval_logps/chosen": -0.2819940149784088,
      "eval_logps/rejected": -0.48641347885131836,
      "eval_loss": 0.8070082664489746,
      "eval_rewards/accuracies": 0.5803571343421936,
      "eval_rewards/chosen": -0.4229910671710968,
      "eval_rewards/margins": 0.3066291809082031,
      "eval_rewards/rejected": -0.7296201586723328,
      "eval_runtime": 30.3523,
      "eval_samples_per_second": 29.388,
      "eval_steps_per_second": 3.69,
      "step": 250
    },
    {
      "epoch": 0.18845700824499412,
      "grad_norm": 0.3010155260562897,
      "learning_rate": 4.638410650401267e-06,
      "logits/chosen": -2.4178171157836914,
      "logits/rejected": -2.914424419403076,
      "logps/chosen": -0.24933743476867676,
      "logps/rejected": -0.4757954478263855,
      "loss": 6.5743,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.3740061819553375,
      "rewards/margins": 0.33968693017959595,
      "rewards/rejected": -0.7136931419372559,
      "step": 260
    },
    {
      "epoch": 0.19570535471595543,
      "grad_norm": 0.3765665888786316,
      "learning_rate": 4.610819813755038e-06,
      "logits/chosen": -2.407275676727295,
      "logits/rejected": -2.886014938354492,
      "logps/chosen": -0.27837619185447693,
      "logps/rejected": -0.6118712425231934,
      "loss": 6.4931,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.417564332485199,
      "rewards/margins": 0.5002425909042358,
      "rewards/rejected": -0.9178068041801453,
      "step": 270
    },
    {
      "epoch": 0.20295370118691675,
      "grad_norm": 0.29865291714668274,
      "learning_rate": 4.582303101775249e-06,
      "logits/chosen": -2.5404791831970215,
      "logits/rejected": -2.861341953277588,
      "logps/chosen": -0.3124132752418518,
      "logps/rejected": -0.558597207069397,
      "loss": 6.2272,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.4686199128627777,
      "rewards/margins": 0.3692759573459625,
      "rewards/rejected": -0.8378958702087402,
      "step": 280
    },
    {
      "epoch": 0.21020204765787803,
      "grad_norm": 0.4311545789241791,
      "learning_rate": 4.55287302283426e-06,
      "logits/chosen": -2.393683910369873,
      "logits/rejected": -2.833466053009033,
      "logps/chosen": -0.3574284017086029,
      "logps/rejected": -0.8571383357048035,
      "loss": 5.7937,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.5361425876617432,
      "rewards/margins": 0.7495648264884949,
      "rewards/rejected": -1.2857074737548828,
      "step": 290
    },
    {
      "epoch": 0.21745039412883935,
      "grad_norm": 0.7148597836494446,
      "learning_rate": 4.522542485937369e-06,
      "logits/chosen": -2.369452476501465,
      "logits/rejected": -2.7431299686431885,
      "logps/chosen": -0.46852874755859375,
      "logps/rejected": -0.9673423767089844,
      "loss": 5.741,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.7027931213378906,
      "rewards/margins": 0.7482204437255859,
      "rewards/rejected": -1.4510136842727661,
      "step": 300
    },
    {
      "epoch": 0.21745039412883935,
      "eval_logits/chosen": -2.5259616374969482,
      "eval_logits/rejected": -2.8154189586639404,
      "eval_logps/chosen": -0.5109023451805115,
      "eval_logps/rejected": -0.9421015381813049,
      "eval_loss": 0.6990054845809937,
      "eval_rewards/accuracies": 0.5714285969734192,
      "eval_rewards/chosen": -0.7663536071777344,
      "eval_rewards/margins": 0.6467987895011902,
      "eval_rewards/rejected": -1.4131524562835693,
      "eval_runtime": 30.3501,
      "eval_samples_per_second": 29.39,
      "eval_steps_per_second": 3.69,
      "step": 300
    },
    {
      "epoch": 0.22469874059980066,
      "grad_norm": 0.37795087695121765,
      "learning_rate": 4.491324795060491e-06,
      "logits/chosen": -2.5089354515075684,
      "logits/rejected": -2.827730894088745,
      "logps/chosen": -0.4125432074069977,
      "logps/rejected": -1.0391974449157715,
      "loss": 5.3005,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.6188148260116577,
      "rewards/margins": 0.9399812817573547,
      "rewards/rejected": -1.5587961673736572,
      "step": 310
    },
    {
      "epoch": 0.23194708707076198,
      "grad_norm": 0.38493892550468445,
      "learning_rate": 4.4592336433146e-06,
      "logits/chosen": -2.3596529960632324,
      "logits/rejected": -2.8088250160217285,
      "logps/chosen": -0.5338706970214844,
      "logps/rejected": -1.2868502140045166,
      "loss": 5.2832,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.8008060455322266,
      "rewards/margins": 1.1294692754745483,
      "rewards/rejected": -1.930275321006775,
      "step": 320
    },
    {
      "epoch": 0.2391954335417233,
      "grad_norm": 0.6237270832061768,
      "learning_rate": 4.426283106939474e-06,
      "logits/chosen": -2.5265612602233887,
      "logits/rejected": -2.7830424308776855,
      "logps/chosen": -0.6278705596923828,
      "logps/rejected": -1.4297993183135986,
      "loss": 5.1977,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.9418058395385742,
      "rewards/margins": 1.2028930187225342,
      "rewards/rejected": -2.1446988582611084,
      "step": 330
    },
    {
      "epoch": 0.2464437800126846,
      "grad_norm": 0.9095363020896912,
      "learning_rate": 4.3924876391293915e-06,
      "logits/chosen": -2.5918514728546143,
      "logits/rejected": -2.8718667030334473,
      "logps/chosen": -0.6412171125411987,
      "logps/rejected": -1.508195161819458,
      "loss": 4.7181,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.9618256688117981,
      "rewards/margins": 1.3004668951034546,
      "rewards/rejected": -2.2622926235198975,
      "step": 340
    },
    {
      "epoch": 0.2536921264836459,
      "grad_norm": 0.6144569516181946,
      "learning_rate": 4.357862063693486e-06,
      "logits/chosen": -2.492586612701416,
      "logits/rejected": -2.9216372966766357,
      "logps/chosen": -0.7246330976486206,
      "logps/rejected": -2.0466060638427734,
      "loss": 4.6111,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -1.0869497060775757,
      "rewards/margins": 1.9829593896865845,
      "rewards/rejected": -3.06990909576416,
      "step": 350
    },
    {
      "epoch": 0.2536921264836459,
      "eval_logits/chosen": -2.7258641719818115,
      "eval_logits/rejected": -2.9883599281311035,
      "eval_logps/chosen": -0.7502545118331909,
      "eval_logps/rejected": -1.7796010971069336,
      "eval_loss": 0.571855366230011,
      "eval_rewards/accuracies": 0.5892857313156128,
      "eval_rewards/chosen": -1.1253817081451416,
      "eval_rewards/margins": 1.5440199375152588,
      "eval_rewards/rejected": -2.6694016456604004,
      "eval_runtime": 30.3197,
      "eval_samples_per_second": 29.42,
      "eval_steps_per_second": 3.694,
      "step": 350
    },
    {
      "epoch": 0.2609404729546072,
      "grad_norm": 0.6816007494926453,
      "learning_rate": 4.322421568553529e-06,
      "logits/chosen": -2.6618990898132324,
      "logits/rejected": -2.9197638034820557,
      "logps/chosen": -0.7783921360969543,
      "logps/rejected": -1.9845831394195557,
      "loss": 4.8991,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -1.1675881147384644,
      "rewards/margins": 1.8092864751815796,
      "rewards/rejected": -2.976874589920044,
      "step": 360
    },
    {
      "epoch": 0.26818881942556855,
      "grad_norm": 0.5186926126480103,
      "learning_rate": 4.286181699082008e-06,
      "logits/chosen": -2.690049171447754,
      "logits/rejected": -2.9129927158355713,
      "logps/chosen": -0.7160422205924988,
      "logps/rejected": -2.097799777984619,
      "loss": 4.6471,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -1.0740633010864258,
      "rewards/margins": 2.0726354122161865,
      "rewards/rejected": -3.1466989517211914,
      "step": 370
    },
    {
      "epoch": 0.27543716589652983,
      "grad_norm": 0.7122822403907776,
      "learning_rate": 4.249158351283414e-06,
      "logits/chosen": -2.6984095573425293,
      "logits/rejected": -2.8411834239959717,
      "logps/chosen": -0.724581778049469,
      "logps/rejected": -2.1322455406188965,
      "loss": 4.4326,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -1.0868725776672363,
      "rewards/margins": 2.1114957332611084,
      "rewards/rejected": -3.198368549346924,
      "step": 380
    },
    {
      "epoch": 0.2826855123674912,
      "grad_norm": 0.610661506652832,
      "learning_rate": 4.211367764821722e-06,
      "logits/chosen": -2.5932717323303223,
      "logits/rejected": -2.855365514755249,
      "logps/chosen": -0.6430361866950989,
      "logps/rejected": -2.8457767963409424,
      "loss": 4.3874,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -0.9645543098449707,
      "rewards/margins": 3.3041110038757324,
      "rewards/rejected": -4.268665313720703,
      "step": 390
    },
    {
      "epoch": 0.28993385883845246,
      "grad_norm": 0.627453088760376,
      "learning_rate": 4.172826515897146e-06,
      "logits/chosen": -2.46079683303833,
      "logits/rejected": -2.839109182357788,
      "logps/chosen": -0.8076715469360352,
      "logps/rejected": -2.6943671703338623,
      "loss": 4.2388,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -1.2115072011947632,
      "rewards/margins": 2.8300435543060303,
      "rewards/rejected": -4.041550636291504,
      "step": 400
    },
    {
      "epoch": 0.28993385883845246,
      "eval_logits/chosen": -2.5736935138702393,
      "eval_logits/rejected": -2.8549301624298096,
      "eval_logps/chosen": -0.8027606010437012,
      "eval_logps/rejected": -2.407195568084717,
      "eval_loss": 0.49557629227638245,
      "eval_rewards/accuracies": 0.6071428656578064,
      "eval_rewards/chosen": -1.2041409015655518,
      "eval_rewards/margins": 2.4066522121429443,
      "eval_rewards/rejected": -3.6107935905456543,
      "eval_runtime": 30.3144,
      "eval_samples_per_second": 29.425,
      "eval_steps_per_second": 3.695,
      "step": 400
    },
    {
      "epoch": 0.2971822053094138,
      "grad_norm": 0.582290768623352,
      "learning_rate": 4.133551509975264e-06,
      "logits/chosen": -2.5232419967651367,
      "logits/rejected": -2.8139169216156006,
      "logps/chosen": -0.7921724319458008,
      "logps/rejected": -2.5887393951416016,
      "loss": 3.9161,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -1.1882586479187012,
      "rewards/margins": 2.694850206375122,
      "rewards/rejected": -3.883108615875244,
      "step": 410
    },
    {
      "epoch": 0.3044305517803751,
      "grad_norm": 1.0708731412887573,
      "learning_rate": 4.093559974371725e-06,
      "logits/chosen": -2.402315616607666,
      "logits/rejected": -2.6774837970733643,
      "logps/chosen": -0.9347984194755554,
      "logps/rejected": -3.4818978309631348,
      "loss": 3.8304,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -1.4021978378295898,
      "rewards/margins": 3.8206489086151123,
      "rewards/rejected": -5.222846508026123,
      "step": 420
    },
    {
      "epoch": 0.31167889825133643,
      "grad_norm": 0.6126013398170471,
      "learning_rate": 4.052869450695776e-06,
      "logits/chosen": -2.4616169929504395,
      "logits/rejected": -2.791163921356201,
      "logps/chosen": -1.073099970817566,
      "logps/rejected": -3.7586822509765625,
      "loss": 3.7662,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -1.609649896621704,
      "rewards/margins": 4.028374671936035,
      "rewards/rejected": -5.63802433013916,
      "step": 430
    },
    {
      "epoch": 0.3189272447222977,
      "grad_norm": 0.6995553374290466,
      "learning_rate": 4.011497787155938e-06,
      "logits/chosen": -2.350310802459717,
      "logits/rejected": -2.750123977661133,
      "logps/chosen": -1.2352540493011475,
      "logps/rejected": -4.180838584899902,
      "loss": 3.4493,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -1.852880835533142,
      "rewards/margins": 4.418376922607422,
      "rewards/rejected": -6.271258354187012,
      "step": 440
    },
    {
      "epoch": 0.32617559119325906,
      "grad_norm": 0.7922266721725464,
      "learning_rate": 3.969463130731183e-06,
      "logits/chosen": -2.368863344192505,
      "logits/rejected": -2.7451913356781006,
      "logps/chosen": -1.0907175540924072,
      "logps/rejected": -4.085494041442871,
      "loss": 4.0187,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -1.6360763311386108,
      "rewards/margins": 4.4921650886535645,
      "rewards/rejected": -6.128241062164307,
      "step": 450
    },
    {
      "epoch": 0.32617559119325906,
      "eval_logits/chosen": -2.44749116897583,
      "eval_logits/rejected": -2.7382781505584717,
      "eval_logps/chosen": -1.3378121852874756,
      "eval_logps/rejected": -3.6518471240997314,
      "eval_loss": 0.45260879397392273,
      "eval_rewards/accuracies": 0.6428571343421936,
      "eval_rewards/chosen": -2.006718158721924,
      "eval_rewards/margins": 3.4710519313812256,
      "eval_rewards/rejected": -5.4777703285217285,
      "eval_runtime": 30.3269,
      "eval_samples_per_second": 29.413,
      "eval_steps_per_second": 3.693,
      "step": 450
    },
    {
      "epoch": 0.33342393766422035,
      "grad_norm": 0.7150818109512329,
      "learning_rate": 3.92678391921108e-06,
      "logits/chosen": -2.4681639671325684,
      "logits/rejected": -2.797795057296753,
      "logps/chosen": -1.4199602603912354,
      "logps/rejected": -4.342296600341797,
      "loss": 3.5788,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -2.1299405097961426,
      "rewards/margins": 4.3835039138793945,
      "rewards/rejected": -6.513444423675537,
      "step": 460
    },
    {
      "epoch": 0.34067228413518164,
      "grad_norm": 1.260711431503296,
      "learning_rate": 3.88347887310836e-06,
      "logits/chosen": -2.4320380687713623,
      "logits/rejected": -2.749352216720581,
      "logps/chosen": -1.318566918373108,
      "logps/rejected": -3.7736339569091797,
      "loss": 3.5681,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -1.977850317955017,
      "rewards/margins": 3.682600498199463,
      "rewards/rejected": -5.6604509353637695,
      "step": 470
    },
    {
      "epoch": 0.347920630606143,
      "grad_norm": 0.9010512828826904,
      "learning_rate": 3.839566987447492e-06,
      "logits/chosen": -2.3840298652648926,
      "logits/rejected": -2.7868971824645996,
      "logps/chosen": -1.6358540058135986,
      "logps/rejected": -5.068122386932373,
      "loss": 3.2884,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -2.4537811279296875,
      "rewards/margins": 5.148402690887451,
      "rewards/rejected": -7.602184295654297,
      "step": 480
    },
    {
      "epoch": 0.35516897707710426,
      "grad_norm": 0.4977961778640747,
      "learning_rate": 3.795067523432826e-06,
      "logits/chosen": -2.4045886993408203,
      "logits/rejected": -2.813812494277954,
      "logps/chosen": -1.5761570930480957,
      "logps/rejected": -5.026966094970703,
      "loss": 3.5505,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -2.3642354011535645,
      "rewards/margins": 5.176213264465332,
      "rewards/rejected": -7.5404486656188965,
      "step": 490
    },
    {
      "epoch": 0.3624173235480656,
      "grad_norm": 0.9888324737548828,
      "learning_rate": 3.7500000000000005e-06,
      "logits/chosen": -2.4877827167510986,
      "logits/rejected": -2.7923107147216797,
      "logps/chosen": -2.0706987380981445,
      "logps/rejected": -5.011548042297363,
      "loss": 3.3992,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -3.106048107147217,
      "rewards/margins": 4.411273002624512,
      "rewards/rejected": -7.517321586608887,
      "step": 500
    },
    {
      "epoch": 0.3624173235480656,
      "eval_logits/chosen": -2.5850253105163574,
      "eval_logits/rejected": -2.898167133331299,
      "eval_logps/chosen": -2.3814480304718018,
      "eval_logps/rejected": -5.132807731628418,
      "eval_loss": 0.40332508087158203,
      "eval_rewards/accuracies": 0.7142857313156128,
      "eval_rewards/chosen": -3.572171926498413,
      "eval_rewards/margins": 4.127039432525635,
      "eval_rewards/rejected": -7.699211597442627,
      "eval_runtime": 30.324,
      "eval_samples_per_second": 29.416,
      "eval_steps_per_second": 3.693,
      "step": 500
    }
  ],
  "logging_steps": 10,
  "max_steps": 1500,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 1.8549061636493148e+18,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}