File size: 33,329 Bytes
c80109c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.6349206349206349,
  "eval_steps": 50,
  "global_step": 500,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.012698412698412698,
      "grad_norm": 0.04658036306500435,
      "learning_rate": 4.999451708687114e-06,
      "logits/chosen": 15.106437683105469,
      "logits/rejected": 15.158523559570312,
      "logps/chosen": -0.30069679021835327,
      "logps/rejected": -0.30243945121765137,
      "loss": 0.9981,
      "rewards/accuracies": 0.3499999940395355,
      "rewards/chosen": -0.45104512572288513,
      "rewards/margins": 0.002613987773656845,
      "rewards/rejected": -0.45365914702415466,
      "step": 10
    },
    {
      "epoch": 0.025396825396825397,
      "grad_norm": 0.05173320695757866,
      "learning_rate": 4.997807075247147e-06,
      "logits/chosen": 14.680102348327637,
      "logits/rejected": 14.592491149902344,
      "logps/chosen": -0.3225177228450775,
      "logps/rejected": -0.27230435609817505,
      "loss": 0.9967,
      "rewards/accuracies": 0.26249998807907104,
      "rewards/chosen": -0.4837765693664551,
      "rewards/margins": -0.07532001286745071,
      "rewards/rejected": -0.40845656394958496,
      "step": 20
    },
    {
      "epoch": 0.0380952380952381,
      "grad_norm": 0.04962443560361862,
      "learning_rate": 4.9950668210706795e-06,
      "logits/chosen": 15.399703979492188,
      "logits/rejected": 15.089459419250488,
      "logps/chosen": -0.2997470498085022,
      "logps/rejected": -0.28447264432907104,
      "loss": 0.9988,
      "rewards/accuracies": 0.3125,
      "rewards/chosen": -0.4496205747127533,
      "rewards/margins": -0.022911589592695236,
      "rewards/rejected": -0.42670899629592896,
      "step": 30
    },
    {
      "epoch": 0.050793650793650794,
      "grad_norm": 0.05171326920390129,
      "learning_rate": 4.9912321481237616e-06,
      "logits/chosen": 15.099847793579102,
      "logits/rejected": 15.159170150756836,
      "logps/chosen": -0.29869550466537476,
      "logps/rejected": -0.2806033790111542,
      "loss": 0.9841,
      "rewards/accuracies": 0.375,
      "rewards/chosen": -0.44804325699806213,
      "rewards/margins": -0.02713816799223423,
      "rewards/rejected": -0.42090511322021484,
      "step": 40
    },
    {
      "epoch": 0.06349206349206349,
      "grad_norm": 0.048664532601833344,
      "learning_rate": 4.986304738420684e-06,
      "logits/chosen": 15.207334518432617,
      "logits/rejected": 15.135488510131836,
      "logps/chosen": -0.3121686577796936,
      "logps/rejected": -0.3194735050201416,
      "loss": 0.9953,
      "rewards/accuracies": 0.3499999940395355,
      "rewards/chosen": -0.468252956867218,
      "rewards/margins": 0.010957291349768639,
      "rewards/rejected": -0.47921022772789,
      "step": 50
    },
    {
      "epoch": 0.06349206349206349,
      "eval_logits/chosen": 15.441752433776855,
      "eval_logits/rejected": 15.288756370544434,
      "eval_logps/chosen": -0.33557233214378357,
      "eval_logps/rejected": -0.31198158860206604,
      "eval_loss": 0.9914231300354004,
      "eval_rewards/accuracies": 0.3125,
      "eval_rewards/chosen": -0.5033585429191589,
      "eval_rewards/margins": -0.03538615256547928,
      "eval_rewards/rejected": -0.46797239780426025,
      "eval_runtime": 19.0844,
      "eval_samples_per_second": 26.723,
      "eval_steps_per_second": 3.354,
      "step": 50
    },
    {
      "epoch": 0.0761904761904762,
      "grad_norm": 0.0722479596734047,
      "learning_rate": 4.980286753286196e-06,
      "logits/chosen": 15.281786918640137,
      "logits/rejected": 15.110156059265137,
      "logps/chosen": -0.30892473459243774,
      "logps/rejected": -0.26251715421676636,
      "loss": 1.0022,
      "rewards/accuracies": 0.25,
      "rewards/chosen": -0.4633871018886566,
      "rewards/margins": -0.0696113258600235,
      "rewards/rejected": -0.3937757611274719,
      "step": 60
    },
    {
      "epoch": 0.08888888888888889,
      "grad_norm": 0.07008200883865356,
      "learning_rate": 4.973180832407471e-06,
      "logits/chosen": 14.836176872253418,
      "logits/rejected": 14.98499584197998,
      "logps/chosen": -0.29483428597450256,
      "logps/rejected": -0.29870957136154175,
      "loss": 0.9849,
      "rewards/accuracies": 0.4000000059604645,
      "rewards/chosen": -0.44225144386291504,
      "rewards/margins": 0.0058129094541072845,
      "rewards/rejected": -0.44806432723999023,
      "step": 70
    },
    {
      "epoch": 0.10158730158730159,
      "grad_norm": 0.06503555178642273,
      "learning_rate": 4.964990092676263e-06,
      "logits/chosen": 15.508198738098145,
      "logits/rejected": 15.592549324035645,
      "logps/chosen": -0.3066270351409912,
      "logps/rejected": -0.2757572531700134,
      "loss": 0.9886,
      "rewards/accuracies": 0.3125,
      "rewards/chosen": -0.45994052290916443,
      "rewards/margins": -0.046304650604724884,
      "rewards/rejected": -0.41363590955734253,
      "step": 80
    },
    {
      "epoch": 0.11428571428571428,
      "grad_norm": 0.10515156388282776,
      "learning_rate": 4.9557181268217225e-06,
      "logits/chosen": 15.301602363586426,
      "logits/rejected": 15.421157836914062,
      "logps/chosen": -0.31223705410957336,
      "logps/rejected": -0.29449179768562317,
      "loss": 0.981,
      "rewards/accuracies": 0.2750000059604645,
      "rewards/chosen": -0.46835556626319885,
      "rewards/margins": -0.02661792002618313,
      "rewards/rejected": -0.44173765182495117,
      "step": 90
    },
    {
      "epoch": 0.12698412698412698,
      "grad_norm": 0.05785346403717995,
      "learning_rate": 4.9453690018345144e-06,
      "logits/chosen": 16.00307846069336,
      "logits/rejected": 15.64977741241455,
      "logps/chosen": -0.32283931970596313,
      "logps/rejected": -0.28576889634132385,
      "loss": 0.9883,
      "rewards/accuracies": 0.2750000059604645,
      "rewards/chosen": -0.4842589795589447,
      "rewards/margins": -0.055605631321668625,
      "rewards/rejected": -0.42865338921546936,
      "step": 100
    },
    {
      "epoch": 0.12698412698412698,
      "eval_logits/chosen": 15.624425888061523,
      "eval_logits/rejected": 15.548928260803223,
      "eval_logps/chosen": -0.33450835943222046,
      "eval_logps/rejected": -0.31935107707977295,
      "eval_loss": 0.9836427569389343,
      "eval_rewards/accuracies": 0.359375,
      "eval_rewards/chosen": -0.5017625689506531,
      "eval_rewards/margins": -0.022735953330993652,
      "eval_rewards/rejected": -0.4790266156196594,
      "eval_runtime": 18.6768,
      "eval_samples_per_second": 27.307,
      "eval_steps_per_second": 3.427,
      "step": 100
    },
    {
      "epoch": 0.13968253968253969,
      "grad_norm": 0.08603859692811966,
      "learning_rate": 4.933947257182901e-06,
      "logits/chosen": 15.40850830078125,
      "logits/rejected": 15.258935928344727,
      "logps/chosen": -0.3209790587425232,
      "logps/rejected": -0.29926618933677673,
      "loss": 0.9852,
      "rewards/accuracies": 0.4124999940395355,
      "rewards/chosen": -0.4814685881137848,
      "rewards/margins": -0.03256931155920029,
      "rewards/rejected": -0.4488992691040039,
      "step": 110
    },
    {
      "epoch": 0.1523809523809524,
      "grad_norm": 0.13407552242279053,
      "learning_rate": 4.921457902821578e-06,
      "logits/chosen": 15.610095024108887,
      "logits/rejected": 15.964601516723633,
      "logps/chosen": -0.2977743446826935,
      "logps/rejected": -0.3102283179759979,
      "loss": 0.9839,
      "rewards/accuracies": 0.38749998807907104,
      "rewards/chosen": -0.4466615617275238,
      "rewards/margins": 0.018680967390537262,
      "rewards/rejected": -0.4653424620628357,
      "step": 120
    },
    {
      "epoch": 0.16507936507936508,
      "grad_norm": 0.1397980898618698,
      "learning_rate": 4.907906416994146e-06,
      "logits/chosen": 15.487627983093262,
      "logits/rejected": 15.767982482910156,
      "logps/chosen": -0.2769243121147156,
      "logps/rejected": -0.3168947100639343,
      "loss": 0.9737,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.415386438369751,
      "rewards/margins": 0.05995568633079529,
      "rewards/rejected": -0.47534212470054626,
      "step": 130
    },
    {
      "epoch": 0.17777777777777778,
      "grad_norm": 0.09849797189235687,
      "learning_rate": 4.893298743830168e-06,
      "logits/chosen": 15.779914855957031,
      "logits/rejected": 15.66816234588623,
      "logps/chosen": -0.2959491014480591,
      "logps/rejected": -0.3029848635196686,
      "loss": 0.9804,
      "rewards/accuracies": 0.375,
      "rewards/chosen": -0.4439236521720886,
      "rewards/margins": 0.010553586296737194,
      "rewards/rejected": -0.4544772207736969,
      "step": 140
    },
    {
      "epoch": 0.19047619047619047,
      "grad_norm": 0.08089074492454529,
      "learning_rate": 4.8776412907378845e-06,
      "logits/chosen": 15.029818534851074,
      "logits/rejected": 15.431653022766113,
      "logps/chosen": -0.2956623435020447,
      "logps/rejected": -0.3162347376346588,
      "loss": 0.9728,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.443493515253067,
      "rewards/margins": 0.030858617275953293,
      "rewards/rejected": -0.4743521809577942,
      "step": 150
    },
    {
      "epoch": 0.19047619047619047,
      "eval_logits/chosen": 15.575506210327148,
      "eval_logits/rejected": 15.54050064086914,
      "eval_logps/chosen": -0.3363308906555176,
      "eval_logps/rejected": -0.3436908721923828,
      "eval_loss": 0.9609583616256714,
      "eval_rewards/accuracies": 0.4375,
      "eval_rewards/chosen": -0.5044962763786316,
      "eval_rewards/margins": 0.01103996392339468,
      "eval_rewards/rejected": -0.5155363082885742,
      "eval_runtime": 18.6082,
      "eval_samples_per_second": 27.407,
      "eval_steps_per_second": 3.439,
      "step": 150
    },
    {
      "epoch": 0.20317460317460317,
      "grad_norm": 0.12168499082326889,
      "learning_rate": 4.860940925593703e-06,
      "logits/chosen": 15.47050952911377,
      "logits/rejected": 15.637664794921875,
      "logps/chosen": -0.32601848244667053,
      "logps/rejected": -0.35739919543266296,
      "loss": 0.9531,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.489027738571167,
      "rewards/margins": 0.04707105830311775,
      "rewards/rejected": -0.5360987782478333,
      "step": 160
    },
    {
      "epoch": 0.21587301587301588,
      "grad_norm": 0.12861700356006622,
      "learning_rate": 4.84320497372973e-06,
      "logits/chosen": 15.594339370727539,
      "logits/rejected": 15.680140495300293,
      "logps/chosen": -0.29271024465560913,
      "logps/rejected": -0.3383347690105438,
      "loss": 0.9386,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.4390653669834137,
      "rewards/margins": 0.06843684613704681,
      "rewards/rejected": -0.5075021982192993,
      "step": 170
    },
    {
      "epoch": 0.22857142857142856,
      "grad_norm": 0.10320646315813065,
      "learning_rate": 4.824441214720629e-06,
      "logits/chosen": 15.644658088684082,
      "logits/rejected": 15.526695251464844,
      "logps/chosen": -0.30950039625167847,
      "logps/rejected": -0.33660295605659485,
      "loss": 0.9313,
      "rewards/accuracies": 0.4124999940395355,
      "rewards/chosen": -0.46425050497055054,
      "rewards/margins": 0.04065385088324547,
      "rewards/rejected": -0.5049043893814087,
      "step": 180
    },
    {
      "epoch": 0.24126984126984127,
      "grad_norm": 0.29178574681282043,
      "learning_rate": 4.804657878971252e-06,
      "logits/chosen": 15.795066833496094,
      "logits/rejected": 15.640788078308105,
      "logps/chosen": -0.34224197268486023,
      "logps/rejected": -0.34523850679397583,
      "loss": 0.9406,
      "rewards/accuracies": 0.375,
      "rewards/chosen": -0.5133630037307739,
      "rewards/margins": 0.0044947536662220955,
      "rewards/rejected": -0.5178577303886414,
      "step": 190
    },
    {
      "epoch": 0.25396825396825395,
      "grad_norm": 0.15747429430484772,
      "learning_rate": 4.783863644106502e-06,
      "logits/chosen": 15.24070930480957,
      "logits/rejected": 15.132087707519531,
      "logps/chosen": -0.3026728630065918,
      "logps/rejected": -0.3765440583229065,
      "loss": 0.9031,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.4540092945098877,
      "rewards/margins": 0.11080671846866608,
      "rewards/rejected": -0.5648160576820374,
      "step": 200
    },
    {
      "epoch": 0.25396825396825395,
      "eval_logits/chosen": 15.073077201843262,
      "eval_logits/rejected": 15.098322868347168,
      "eval_logps/chosen": -0.3540771007537842,
      "eval_logps/rejected": -0.41381165385246277,
      "eval_loss": 0.9153187274932861,
      "eval_rewards/accuracies": 0.5,
      "eval_rewards/chosen": -0.5311156511306763,
      "eval_rewards/margins": 0.08960187435150146,
      "eval_rewards/rejected": -0.6207175254821777,
      "eval_runtime": 18.5936,
      "eval_samples_per_second": 27.429,
      "eval_steps_per_second": 3.442,
      "step": 200
    },
    {
      "epoch": 0.26666666666666666,
      "grad_norm": 0.9226244688034058,
      "learning_rate": 4.762067631165049e-06,
      "logits/chosen": 15.300783157348633,
      "logits/rejected": 15.6528902053833,
      "logps/chosen": -0.3110392093658447,
      "logps/rejected": -0.4790540635585785,
      "loss": 0.8977,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.4665588438510895,
      "rewards/margins": 0.25202232599258423,
      "rewards/rejected": -0.7185810804367065,
      "step": 210
    },
    {
      "epoch": 0.27936507936507937,
      "grad_norm": 0.187363401055336,
      "learning_rate": 4.7392794005985324e-06,
      "logits/chosen": 15.123028755187988,
      "logits/rejected": 14.940861701965332,
      "logps/chosen": -0.33024150133132935,
      "logps/rejected": -0.35755541920661926,
      "loss": 0.9074,
      "rewards/accuracies": 0.42500001192092896,
      "rewards/chosen": -0.49536222219467163,
      "rewards/margins": 0.04097090661525726,
      "rewards/rejected": -0.5363331437110901,
      "step": 220
    },
    {
      "epoch": 0.2920634920634921,
      "grad_norm": 0.27345994114875793,
      "learning_rate": 4.715508948078037e-06,
      "logits/chosen": 14.54762077331543,
      "logits/rejected": 14.207303047180176,
      "logps/chosen": -0.2951691150665283,
      "logps/rejected": -0.41360992193222046,
      "loss": 0.896,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.4427536427974701,
      "rewards/margins": 0.1776612401008606,
      "rewards/rejected": -0.6204149723052979,
      "step": 230
    },
    {
      "epoch": 0.3047619047619048,
      "grad_norm": 0.20160575211048126,
      "learning_rate": 4.690766700109659e-06,
      "logits/chosen": 14.768750190734863,
      "logits/rejected": 14.940885543823242,
      "logps/chosen": -0.3044833838939667,
      "logps/rejected": -0.4275297224521637,
      "loss": 0.891,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.4567251205444336,
      "rewards/margins": 0.18456946313381195,
      "rewards/rejected": -0.6412945985794067,
      "step": 240
    },
    {
      "epoch": 0.31746031746031744,
      "grad_norm": 0.9059060215950012,
      "learning_rate": 4.665063509461098e-06,
      "logits/chosen": 14.839933395385742,
      "logits/rejected": 14.69981861114502,
      "logps/chosen": -0.3137063980102539,
      "logps/rejected": -0.49661844968795776,
      "loss": 0.8621,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.47055959701538086,
      "rewards/margins": 0.2743679881095886,
      "rewards/rejected": -0.7449275851249695,
      "step": 250
    },
    {
      "epoch": 0.31746031746031744,
      "eval_logits/chosen": 14.19374942779541,
      "eval_logits/rejected": 14.215425491333008,
      "eval_logps/chosen": -0.3999108076095581,
      "eval_logps/rejected": -0.7892026305198669,
      "eval_loss": 0.7948001623153687,
      "eval_rewards/accuracies": 0.53125,
      "eval_rewards/chosen": -0.5998662114143372,
      "eval_rewards/margins": 0.5839377641677856,
      "eval_rewards/rejected": -1.1838040351867676,
      "eval_runtime": 18.6098,
      "eval_samples_per_second": 27.405,
      "eval_steps_per_second": 3.439,
      "step": 250
    },
    {
      "epoch": 0.33015873015873015,
      "grad_norm": 0.8104033470153809,
      "learning_rate": 4.638410650401267e-06,
      "logits/chosen": 14.144885063171387,
      "logits/rejected": 14.350593566894531,
      "logps/chosen": -0.3469873368740082,
      "logps/rejected": -0.8288809657096863,
      "loss": 0.7939,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.5204810500144958,
      "rewards/margins": 0.7228401899337769,
      "rewards/rejected": -1.243321418762207,
      "step": 260
    },
    {
      "epoch": 0.34285714285714286,
      "grad_norm": 0.39150306582450867,
      "learning_rate": 4.610819813755038e-06,
      "logits/chosen": 14.15583324432373,
      "logits/rejected": 13.783352851867676,
      "logps/chosen": -0.34353378415107727,
      "logps/rejected": -1.1232259273529053,
      "loss": 0.7635,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.5153006911277771,
      "rewards/margins": 1.1695382595062256,
      "rewards/rejected": -1.684838891029358,
      "step": 270
    },
    {
      "epoch": 0.35555555555555557,
      "grad_norm": 0.1740872859954834,
      "learning_rate": 4.582303101775249e-06,
      "logits/chosen": 13.705289840698242,
      "logits/rejected": 13.493337631225586,
      "logps/chosen": -0.35998308658599854,
      "logps/rejected": -1.3943986892700195,
      "loss": 0.7678,
      "rewards/accuracies": 0.48750001192092896,
      "rewards/chosen": -0.539974570274353,
      "rewards/margins": 1.5516235828399658,
      "rewards/rejected": -2.0915980339050293,
      "step": 280
    },
    {
      "epoch": 0.3682539682539683,
      "grad_norm": 0.22531260550022125,
      "learning_rate": 4.55287302283426e-06,
      "logits/chosen": 13.186914443969727,
      "logits/rejected": 13.140413284301758,
      "logps/chosen": -0.3548193573951721,
      "logps/rejected": -1.2321991920471191,
      "loss": 0.7719,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.5322290062904358,
      "rewards/margins": 1.3160697221755981,
      "rewards/rejected": -1.8482987880706787,
      "step": 290
    },
    {
      "epoch": 0.38095238095238093,
      "grad_norm": 0.5229180455207825,
      "learning_rate": 4.522542485937369e-06,
      "logits/chosen": 13.780011177062988,
      "logits/rejected": 13.604715347290039,
      "logps/chosen": -0.4277075231075287,
      "logps/rejected": -1.415838599205017,
      "loss": 0.7644,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.6415613293647766,
      "rewards/margins": 1.482196569442749,
      "rewards/rejected": -2.123757839202881,
      "step": 300
    },
    {
      "epoch": 0.38095238095238093,
      "eval_logits/chosen": 13.13498592376709,
      "eval_logits/rejected": 13.13513469696045,
      "eval_logps/chosen": -0.476482629776001,
      "eval_logps/rejected": -1.4701811075210571,
      "eval_loss": 0.7514793872833252,
      "eval_rewards/accuracies": 0.546875,
      "eval_rewards/chosen": -0.7147239446640015,
      "eval_rewards/margins": 1.4905478954315186,
      "eval_rewards/rejected": -2.2052717208862305,
      "eval_runtime": 18.6093,
      "eval_samples_per_second": 27.406,
      "eval_steps_per_second": 3.439,
      "step": 300
    },
    {
      "epoch": 0.39365079365079364,
      "grad_norm": 0.7556002736091614,
      "learning_rate": 4.491324795060491e-06,
      "logits/chosen": 13.26073932647705,
      "logits/rejected": 13.28388786315918,
      "logps/chosen": -0.39378833770751953,
      "logps/rejected": -1.4242979288101196,
      "loss": 0.7092,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.5906823873519897,
      "rewards/margins": 1.5457642078399658,
      "rewards/rejected": -2.136446714401245,
      "step": 310
    },
    {
      "epoch": 0.40634920634920635,
      "grad_norm": 0.8167753219604492,
      "learning_rate": 4.4592336433146e-06,
      "logits/chosen": 12.952977180480957,
      "logits/rejected": 12.89118480682373,
      "logps/chosen": -0.4167153835296631,
      "logps/rejected": -1.1332799196243286,
      "loss": 0.7364,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.6250730752944946,
      "rewards/margins": 1.074846625328064,
      "rewards/rejected": -1.6999199390411377,
      "step": 320
    },
    {
      "epoch": 0.41904761904761906,
      "grad_norm": 2.9806692600250244,
      "learning_rate": 4.426283106939474e-06,
      "logits/chosen": 12.890368461608887,
      "logits/rejected": 12.765925407409668,
      "logps/chosen": -0.5025959014892578,
      "logps/rejected": -1.2589428424835205,
      "loss": 0.748,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.7538937926292419,
      "rewards/margins": 1.1345205307006836,
      "rewards/rejected": -1.8884143829345703,
      "step": 330
    },
    {
      "epoch": 0.43174603174603177,
      "grad_norm": 1.568097710609436,
      "learning_rate": 4.3924876391293915e-06,
      "logits/chosen": 13.032608032226562,
      "logits/rejected": 12.877195358276367,
      "logps/chosen": -0.5034081935882568,
      "logps/rejected": -1.585137963294983,
      "loss": 0.7516,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.7551122903823853,
      "rewards/margins": 1.6225944757461548,
      "rewards/rejected": -2.37770676612854,
      "step": 340
    },
    {
      "epoch": 0.4444444444444444,
      "grad_norm": 0.5996735095977783,
      "learning_rate": 4.357862063693486e-06,
      "logits/chosen": 12.859817504882812,
      "logits/rejected": 12.712678909301758,
      "logps/chosen": -0.5043476819992065,
      "logps/rejected": -1.525444507598877,
      "loss": 0.7497,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.7565216422080994,
      "rewards/margins": 1.5316450595855713,
      "rewards/rejected": -2.2881667613983154,
      "step": 350
    },
    {
      "epoch": 0.4444444444444444,
      "eval_logits/chosen": 12.228137969970703,
      "eval_logits/rejected": 12.226001739501953,
      "eval_logps/chosen": -0.5828607082366943,
      "eval_logps/rejected": -1.6681612730026245,
      "eval_loss": 0.7238383889198303,
      "eval_rewards/accuracies": 0.6875,
      "eval_rewards/chosen": -0.8742910623550415,
      "eval_rewards/margins": 1.6279507875442505,
      "eval_rewards/rejected": -2.502241611480713,
      "eval_runtime": 18.6038,
      "eval_samples_per_second": 27.414,
      "eval_steps_per_second": 3.44,
      "step": 350
    },
    {
      "epoch": 0.45714285714285713,
      "grad_norm": 0.605993926525116,
      "learning_rate": 4.322421568553529e-06,
      "logits/chosen": 11.993739128112793,
      "logits/rejected": 11.75650691986084,
      "logps/chosen": -0.5674928426742554,
      "logps/rejected": -1.7509374618530273,
      "loss": 0.7241,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.8512393236160278,
      "rewards/margins": 1.7751665115356445,
      "rewards/rejected": -2.626405954360962,
      "step": 360
    },
    {
      "epoch": 0.46984126984126984,
      "grad_norm": 0.931057870388031,
      "learning_rate": 4.286181699082008e-06,
      "logits/chosen": 11.784490585327148,
      "logits/rejected": 12.052295684814453,
      "logps/chosen": -0.5866945385932922,
      "logps/rejected": -1.8955312967300415,
      "loss": 0.7141,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.8800417184829712,
      "rewards/margins": 1.9632551670074463,
      "rewards/rejected": -2.843297243118286,
      "step": 370
    },
    {
      "epoch": 0.48253968253968255,
      "grad_norm": 1.3936405181884766,
      "learning_rate": 4.249158351283414e-06,
      "logits/chosen": 11.842119216918945,
      "logits/rejected": 11.340182304382324,
      "logps/chosen": -0.7804869413375854,
      "logps/rejected": -1.8759396076202393,
      "loss": 0.6654,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -1.1707303524017334,
      "rewards/margins": 1.6431787014007568,
      "rewards/rejected": -2.8139090538024902,
      "step": 380
    },
    {
      "epoch": 0.49523809523809526,
      "grad_norm": 1.737855076789856,
      "learning_rate": 4.211367764821722e-06,
      "logits/chosen": 11.465726852416992,
      "logits/rejected": 11.05290699005127,
      "logps/chosen": -1.3201282024383545,
      "logps/rejected": -2.3962795734405518,
      "loss": 0.6301,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -1.9801921844482422,
      "rewards/margins": 1.6142269372940063,
      "rewards/rejected": -3.594419002532959,
      "step": 390
    },
    {
      "epoch": 0.5079365079365079,
      "grad_norm": 1.6870065927505493,
      "learning_rate": 4.172826515897146e-06,
      "logits/chosen": 10.830609321594238,
      "logits/rejected": 10.663077354431152,
      "logps/chosen": -2.5642189979553223,
      "logps/rejected": -3.535013198852539,
      "loss": 0.6274,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -3.8463282585144043,
      "rewards/margins": 1.4561914205551147,
      "rewards/rejected": -5.302519798278809,
      "step": 400
    },
    {
      "epoch": 0.5079365079365079,
      "eval_logits/chosen": 9.964677810668945,
      "eval_logits/rejected": 9.858954429626465,
      "eval_logps/chosen": -2.578787088394165,
      "eval_logps/rejected": -3.803541898727417,
      "eval_loss": 0.590033233165741,
      "eval_rewards/accuracies": 0.796875,
      "eval_rewards/chosen": -3.868180513381958,
      "eval_rewards/margins": 1.837132453918457,
      "eval_rewards/rejected": -5.705312728881836,
      "eval_runtime": 18.5898,
      "eval_samples_per_second": 27.434,
      "eval_steps_per_second": 3.443,
      "step": 400
    },
    {
      "epoch": 0.5206349206349207,
      "grad_norm": 2.0417189598083496,
      "learning_rate": 4.133551509975264e-06,
      "logits/chosen": 9.34677505493164,
      "logits/rejected": 9.576300621032715,
      "logps/chosen": -2.1631455421447754,
      "logps/rejected": -3.024636745452881,
      "loss": 0.6309,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -3.244718074798584,
      "rewards/margins": 1.2922370433807373,
      "rewards/rejected": -4.536954879760742,
      "step": 410
    },
    {
      "epoch": 0.5333333333333333,
      "grad_norm": 2.9144859313964844,
      "learning_rate": 4.093559974371725e-06,
      "logits/chosen": 9.20117473602295,
      "logits/rejected": 9.481060028076172,
      "logps/chosen": -2.7644081115722656,
      "logps/rejected": -3.905733585357666,
      "loss": 0.5839,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -4.14661169052124,
      "rewards/margins": 1.7119888067245483,
      "rewards/rejected": -5.858600616455078,
      "step": 420
    },
    {
      "epoch": 0.546031746031746,
      "grad_norm": 2.4795055389404297,
      "learning_rate": 4.052869450695776e-06,
      "logits/chosen": 9.674077987670898,
      "logits/rejected": 9.64409065246582,
      "logps/chosen": -2.562514066696167,
      "logps/rejected": -3.8458714485168457,
      "loss": 0.5266,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -3.843771457672119,
      "rewards/margins": 1.9250361919403076,
      "rewards/rejected": -5.768807411193848,
      "step": 430
    },
    {
      "epoch": 0.5587301587301587,
      "grad_norm": 1.897057056427002,
      "learning_rate": 4.011497787155938e-06,
      "logits/chosen": 8.887510299682617,
      "logits/rejected": 8.687074661254883,
      "logps/chosen": -3.2801125049591064,
      "logps/rejected": -4.673043251037598,
      "loss": 0.5665,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -4.920168876647949,
      "rewards/margins": 2.089395523071289,
      "rewards/rejected": -7.009564399719238,
      "step": 440
    },
    {
      "epoch": 0.5714285714285714,
      "grad_norm": 2.1004762649536133,
      "learning_rate": 3.969463130731183e-06,
      "logits/chosen": 8.606618881225586,
      "logits/rejected": 8.299476623535156,
      "logps/chosen": -3.3771705627441406,
      "logps/rejected": -4.999676704406738,
      "loss": 0.5118,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -5.0657548904418945,
      "rewards/margins": 2.433760166168213,
      "rewards/rejected": -7.499515533447266,
      "step": 450
    },
    {
      "epoch": 0.5714285714285714,
      "eval_logits/chosen": 8.417736053466797,
      "eval_logits/rejected": 8.183667182922363,
      "eval_logps/chosen": -3.3982253074645996,
      "eval_logps/rejected": -5.010103225708008,
      "eval_loss": 0.5201926827430725,
      "eval_rewards/accuracies": 0.796875,
      "eval_rewards/chosen": -5.0973381996154785,
      "eval_rewards/margins": 2.4178173542022705,
      "eval_rewards/rejected": -7.515154838562012,
      "eval_runtime": 18.6069,
      "eval_samples_per_second": 27.409,
      "eval_steps_per_second": 3.44,
      "step": 450
    },
    {
      "epoch": 0.5841269841269842,
      "grad_norm": 2.016348123550415,
      "learning_rate": 3.92678391921108e-06,
      "logits/chosen": 9.348031997680664,
      "logits/rejected": 8.707467079162598,
      "logps/chosen": -3.687103271484375,
      "logps/rejected": -5.414787769317627,
      "loss": 0.4783,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -5.5306549072265625,
      "rewards/margins": 2.591526508331299,
      "rewards/rejected": -8.12218189239502,
      "step": 460
    },
    {
      "epoch": 0.5968253968253968,
      "grad_norm": 2.9189071655273438,
      "learning_rate": 3.88347887310836e-06,
      "logits/chosen": 8.294754028320312,
      "logits/rejected": 7.891358852386475,
      "logps/chosen": -3.737588405609131,
      "logps/rejected": -5.187026023864746,
      "loss": 0.534,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -5.606382846832275,
      "rewards/margins": 2.1741559505462646,
      "rewards/rejected": -7.780538082122803,
      "step": 470
    },
    {
      "epoch": 0.6095238095238096,
      "grad_norm": 2.306356906890869,
      "learning_rate": 3.839566987447492e-06,
      "logits/chosen": 8.484542846679688,
      "logits/rejected": 8.39714241027832,
      "logps/chosen": -3.494408369064331,
      "logps/rejected": -5.29335880279541,
      "loss": 0.4829,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -5.241612911224365,
      "rewards/margins": 2.6984262466430664,
      "rewards/rejected": -7.940038204193115,
      "step": 480
    },
    {
      "epoch": 0.6222222222222222,
      "grad_norm": 2.2581255435943604,
      "learning_rate": 3.795067523432826e-06,
      "logits/chosen": 8.763944625854492,
      "logits/rejected": 8.361797332763672,
      "logps/chosen": -3.9699864387512207,
      "logps/rejected": -6.275031566619873,
      "loss": 0.4437,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -5.954979419708252,
      "rewards/margins": 3.4575679302215576,
      "rewards/rejected": -9.412548065185547,
      "step": 490
    },
    {
      "epoch": 0.6349206349206349,
      "grad_norm": 2.2695906162261963,
      "learning_rate": 3.7500000000000005e-06,
      "logits/chosen": 7.8348212242126465,
      "logits/rejected": 7.482022285461426,
      "logps/chosen": -3.7032783031463623,
      "logps/rejected": -5.9005255699157715,
      "loss": 0.4411,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -5.554916858673096,
      "rewards/margins": 3.2958710193634033,
      "rewards/rejected": -8.850788116455078,
      "step": 500
    },
    {
      "epoch": 0.6349206349206349,
      "eval_logits/chosen": 7.651264190673828,
      "eval_logits/rejected": 7.331784248352051,
      "eval_logps/chosen": -3.6274948120117188,
      "eval_logps/rejected": -5.359984874725342,
      "eval_loss": 0.4819534122943878,
      "eval_rewards/accuracies": 0.796875,
      "eval_rewards/chosen": -5.441242694854736,
      "eval_rewards/margins": 2.5987353324890137,
      "eval_rewards/rejected": -8.03997802734375,
      "eval_runtime": 18.6061,
      "eval_samples_per_second": 27.41,
      "eval_steps_per_second": 3.44,
      "step": 500
    }
  ],
  "logging_steps": 10,
  "max_steps": 1500,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 1.212273614720598e+18,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}