File size: 33,486 Bytes
e84db86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.5330490405117271,
  "eval_steps": 50,
  "global_step": 500,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.010660980810234541,
      "grad_norm": 0.051327500492334366,
      "learning_rate": 4.999451708687114e-06,
      "logits/chosen": 14.755006790161133,
      "logits/rejected": 14.735244750976562,
      "logps/chosen": -0.29377540946006775,
      "logps/rejected": -0.30969956517219543,
      "loss": 0.952,
      "rewards/accuracies": 0.4375,
      "rewards/chosen": -0.44066309928894043,
      "rewards/margins": 0.023886267095804214,
      "rewards/rejected": -0.46454939246177673,
      "step": 10
    },
    {
      "epoch": 0.021321961620469083,
      "grad_norm": 0.04346882924437523,
      "learning_rate": 4.997807075247147e-06,
      "logits/chosen": 14.513801574707031,
      "logits/rejected": 14.946454048156738,
      "logps/chosen": -0.27995699644088745,
      "logps/rejected": -0.30138006806373596,
      "loss": 0.9726,
      "rewards/accuracies": 0.4124999940395355,
      "rewards/chosen": -0.4199354648590088,
      "rewards/margins": 0.03213457390666008,
      "rewards/rejected": -0.45207005739212036,
      "step": 20
    },
    {
      "epoch": 0.031982942430703626,
      "grad_norm": 0.05228634551167488,
      "learning_rate": 4.9950668210706795e-06,
      "logits/chosen": 14.266324043273926,
      "logits/rejected": 14.423965454101562,
      "logps/chosen": -0.2919609546661377,
      "logps/rejected": -0.32358455657958984,
      "loss": 0.9622,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.43794146180152893,
      "rewards/margins": 0.047435395419597626,
      "rewards/rejected": -0.48537683486938477,
      "step": 30
    },
    {
      "epoch": 0.042643923240938165,
      "grad_norm": 0.05487598106265068,
      "learning_rate": 4.9912321481237616e-06,
      "logits/chosen": 14.965211868286133,
      "logits/rejected": 15.058088302612305,
      "logps/chosen": -0.277716726064682,
      "logps/rejected": -0.3055034577846527,
      "loss": 0.9403,
      "rewards/accuracies": 0.4000000059604645,
      "rewards/chosen": -0.4165751039981842,
      "rewards/margins": 0.04168009012937546,
      "rewards/rejected": -0.4582551419734955,
      "step": 40
    },
    {
      "epoch": 0.053304904051172705,
      "grad_norm": 0.057255037128925323,
      "learning_rate": 4.986304738420684e-06,
      "logits/chosen": 14.539288520812988,
      "logits/rejected": 15.174041748046875,
      "logps/chosen": -0.26362231373786926,
      "logps/rejected": -0.3325727581977844,
      "loss": 0.9588,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.3954334557056427,
      "rewards/margins": 0.10342560708522797,
      "rewards/rejected": -0.49885907769203186,
      "step": 50
    },
    {
      "epoch": 0.053304904051172705,
      "eval_logits/chosen": 14.618952751159668,
      "eval_logits/rejected": 15.176809310913086,
      "eval_logps/chosen": -0.2685677409172058,
      "eval_logps/rejected": -0.3283654451370239,
      "eval_loss": 0.9551004767417908,
      "eval_rewards/accuracies": 0.5131579041481018,
      "eval_rewards/chosen": -0.4028516113758087,
      "eval_rewards/margins": 0.08969658613204956,
      "eval_rewards/rejected": -0.4925481975078583,
      "eval_runtime": 21.4453,
      "eval_samples_per_second": 28.305,
      "eval_steps_per_second": 3.544,
      "step": 50
    },
    {
      "epoch": 0.06396588486140725,
      "grad_norm": 0.05227242782711983,
      "learning_rate": 4.980286753286196e-06,
      "logits/chosen": 14.787714004516602,
      "logits/rejected": 15.379422187805176,
      "logps/chosen": -0.3143109679222107,
      "logps/rejected": -0.3425135612487793,
      "loss": 0.9636,
      "rewards/accuracies": 0.4625000059604645,
      "rewards/chosen": -0.4714665412902832,
      "rewards/margins": 0.042303841561079025,
      "rewards/rejected": -0.513770341873169,
      "step": 60
    },
    {
      "epoch": 0.07462686567164178,
      "grad_norm": 0.0658508762717247,
      "learning_rate": 4.973180832407471e-06,
      "logits/chosen": 15.149365425109863,
      "logits/rejected": 15.115835189819336,
      "logps/chosen": -0.31501108407974243,
      "logps/rejected": -0.2854115962982178,
      "loss": 0.9677,
      "rewards/accuracies": 0.36250001192092896,
      "rewards/chosen": -0.47251659631729126,
      "rewards/margins": -0.04439922422170639,
      "rewards/rejected": -0.4281173646450043,
      "step": 70
    },
    {
      "epoch": 0.08528784648187633,
      "grad_norm": 0.06567618995904922,
      "learning_rate": 4.964990092676263e-06,
      "logits/chosen": 15.393908500671387,
      "logits/rejected": 15.454248428344727,
      "logps/chosen": -0.31166282296180725,
      "logps/rejected": -0.3178747594356537,
      "loss": 0.9609,
      "rewards/accuracies": 0.42500001192092896,
      "rewards/chosen": -0.4674941897392273,
      "rewards/margins": 0.009317949414253235,
      "rewards/rejected": -0.47681212425231934,
      "step": 80
    },
    {
      "epoch": 0.09594882729211088,
      "grad_norm": 0.07566913962364197,
      "learning_rate": 4.9557181268217225e-06,
      "logits/chosen": 15.229632377624512,
      "logits/rejected": 15.477168083190918,
      "logps/chosen": -0.3294064998626709,
      "logps/rejected": -0.3528878390789032,
      "loss": 0.9587,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.49410971999168396,
      "rewards/margins": 0.03522203490138054,
      "rewards/rejected": -0.5293318033218384,
      "step": 90
    },
    {
      "epoch": 0.10660980810234541,
      "grad_norm": 0.09082464128732681,
      "learning_rate": 4.9453690018345144e-06,
      "logits/chosen": 14.481330871582031,
      "logits/rejected": 15.092982292175293,
      "logps/chosen": -0.2656436562538147,
      "logps/rejected": -0.33982905745506287,
      "loss": 0.9548,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.39846545457839966,
      "rewards/margins": 0.11127817630767822,
      "rewards/rejected": -0.5097435712814331,
      "step": 100
    },
    {
      "epoch": 0.10660980810234541,
      "eval_logits/chosen": 14.7100830078125,
      "eval_logits/rejected": 15.274725914001465,
      "eval_logps/chosen": -0.26462864875793457,
      "eval_logps/rejected": -0.331702321767807,
      "eval_loss": 0.947841465473175,
      "eval_rewards/accuracies": 0.5394737124443054,
      "eval_rewards/chosen": -0.39694297313690186,
      "eval_rewards/margins": 0.10061051696538925,
      "eval_rewards/rejected": -0.4975534677505493,
      "eval_runtime": 21.4421,
      "eval_samples_per_second": 28.309,
      "eval_steps_per_second": 3.544,
      "step": 100
    },
    {
      "epoch": 0.11727078891257996,
      "grad_norm": 0.20198923349380493,
      "learning_rate": 4.933947257182901e-06,
      "logits/chosen": 14.932653427124023,
      "logits/rejected": 15.476409912109375,
      "logps/chosen": -0.27830976247787476,
      "logps/rejected": -0.34150317311286926,
      "loss": 0.9487,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.41746464371681213,
      "rewards/margins": 0.09479012340307236,
      "rewards/rejected": -0.5122548341751099,
      "step": 110
    },
    {
      "epoch": 0.1279317697228145,
      "grad_norm": 0.31938356161117554,
      "learning_rate": 4.921457902821578e-06,
      "logits/chosen": 15.280967712402344,
      "logits/rejected": 15.5416259765625,
      "logps/chosen": -0.2816022038459778,
      "logps/rejected": -0.3262938857078552,
      "loss": 0.9483,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.4224032461643219,
      "rewards/margins": 0.06703753769397736,
      "rewards/rejected": -0.48944082856178284,
      "step": 120
    },
    {
      "epoch": 0.13859275053304904,
      "grad_norm": 0.12567812204360962,
      "learning_rate": 4.907906416994146e-06,
      "logits/chosen": 14.967382431030273,
      "logits/rejected": 15.351877212524414,
      "logps/chosen": -0.3148510456085205,
      "logps/rejected": -0.3488944172859192,
      "loss": 0.957,
      "rewards/accuracies": 0.48750001192092896,
      "rewards/chosen": -0.47227659821510315,
      "rewards/margins": 0.05106503888964653,
      "rewards/rejected": -0.5233416557312012,
      "step": 130
    },
    {
      "epoch": 0.14925373134328357,
      "grad_norm": 0.09151162207126617,
      "learning_rate": 4.893298743830168e-06,
      "logits/chosen": 14.900466918945312,
      "logits/rejected": 15.075350761413574,
      "logps/chosen": -0.2766302227973938,
      "logps/rejected": -0.312236487865448,
      "loss": 0.9373,
      "rewards/accuracies": 0.42500001192092896,
      "rewards/chosen": -0.4149452745914459,
      "rewards/margins": 0.05340944975614548,
      "rewards/rejected": -0.4683547616004944,
      "step": 140
    },
    {
      "epoch": 0.15991471215351813,
      "grad_norm": 0.1259378045797348,
      "learning_rate": 4.8776412907378845e-06,
      "logits/chosen": 14.528109550476074,
      "logits/rejected": 14.861102104187012,
      "logps/chosen": -0.2683579921722412,
      "logps/rejected": -0.33838269114494324,
      "loss": 0.9388,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.40253695845603943,
      "rewards/margins": 0.10503707826137543,
      "rewards/rejected": -0.5075740218162537,
      "step": 150
    },
    {
      "epoch": 0.15991471215351813,
      "eval_logits/chosen": 14.12246036529541,
      "eval_logits/rejected": 14.733266830444336,
      "eval_logps/chosen": -0.2611957788467407,
      "eval_logps/rejected": -0.3492279350757599,
      "eval_loss": 0.9302574396133423,
      "eval_rewards/accuracies": 0.5657894611358643,
      "eval_rewards/chosen": -0.3917936384677887,
      "eval_rewards/margins": 0.13204820454120636,
      "eval_rewards/rejected": -0.5238418579101562,
      "eval_runtime": 21.4406,
      "eval_samples_per_second": 28.311,
      "eval_steps_per_second": 3.545,
      "step": 150
    },
    {
      "epoch": 0.17057569296375266,
      "grad_norm": 0.11400051414966583,
      "learning_rate": 4.860940925593703e-06,
      "logits/chosen": 14.4571533203125,
      "logits/rejected": 14.769159317016602,
      "logps/chosen": -0.31032469868659973,
      "logps/rejected": -0.34650668501853943,
      "loss": 0.9396,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.4654870927333832,
      "rewards/margins": 0.05427298694849014,
      "rewards/rejected": -0.519760012626648,
      "step": 160
    },
    {
      "epoch": 0.1812366737739872,
      "grad_norm": 0.1102401539683342,
      "learning_rate": 4.84320497372973e-06,
      "logits/chosen": 13.959765434265137,
      "logits/rejected": 14.27458381652832,
      "logps/chosen": -0.2744378447532654,
      "logps/rejected": -0.35702812671661377,
      "loss": 0.9222,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.41165676712989807,
      "rewards/margins": 0.12388546764850616,
      "rewards/rejected": -0.5355421900749207,
      "step": 170
    },
    {
      "epoch": 0.19189765458422176,
      "grad_norm": 0.14721031486988068,
      "learning_rate": 4.824441214720629e-06,
      "logits/chosen": 13.54602336883545,
      "logits/rejected": 14.076690673828125,
      "logps/chosen": -0.2713850140571594,
      "logps/rejected": -0.40618976950645447,
      "loss": 0.9052,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.40707746148109436,
      "rewards/margins": 0.20220720767974854,
      "rewards/rejected": -0.6092846989631653,
      "step": 180
    },
    {
      "epoch": 0.2025586353944563,
      "grad_norm": 0.1756824553012848,
      "learning_rate": 4.804657878971252e-06,
      "logits/chosen": 12.6314697265625,
      "logits/rejected": 13.246849060058594,
      "logps/chosen": -0.27216213941574097,
      "logps/rejected": -0.4351380467414856,
      "loss": 0.8996,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.40824323892593384,
      "rewards/margins": 0.24446387588977814,
      "rewards/rejected": -0.652707040309906,
      "step": 190
    },
    {
      "epoch": 0.21321961620469082,
      "grad_norm": 0.15476027131080627,
      "learning_rate": 4.783863644106502e-06,
      "logits/chosen": 12.40199089050293,
      "logits/rejected": 12.966108322143555,
      "logps/chosen": -0.303610622882843,
      "logps/rejected": -0.423031747341156,
      "loss": 0.9015,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.4554159641265869,
      "rewards/margins": 0.17913168668746948,
      "rewards/rejected": -0.6345476508140564,
      "step": 200
    },
    {
      "epoch": 0.21321961620469082,
      "eval_logits/chosen": 11.887229919433594,
      "eval_logits/rejected": 12.5900239944458,
      "eval_logps/chosen": -0.269090861082077,
      "eval_logps/rejected": -0.42408913373947144,
      "eval_loss": 0.8796805143356323,
      "eval_rewards/accuracies": 0.6447368264198303,
      "eval_rewards/chosen": -0.40363630652427673,
      "eval_rewards/margins": 0.23249731957912445,
      "eval_rewards/rejected": -0.6361336708068848,
      "eval_runtime": 21.4455,
      "eval_samples_per_second": 28.304,
      "eval_steps_per_second": 3.544,
      "step": 200
    },
    {
      "epoch": 0.22388059701492538,
      "grad_norm": 0.18212148547172546,
      "learning_rate": 4.762067631165049e-06,
      "logits/chosen": 12.375594139099121,
      "logits/rejected": 12.701678276062012,
      "logps/chosen": -0.3136894702911377,
      "logps/rejected": -0.3944609761238098,
      "loss": 0.8898,
      "rewards/accuracies": 0.4625000059604645,
      "rewards/chosen": -0.47053417563438416,
      "rewards/margins": 0.12115727365016937,
      "rewards/rejected": -0.5916914939880371,
      "step": 210
    },
    {
      "epoch": 0.2345415778251599,
      "grad_norm": 0.5440058708190918,
      "learning_rate": 4.7392794005985324e-06,
      "logits/chosen": 11.23914909362793,
      "logits/rejected": 11.926396369934082,
      "logps/chosen": -0.3077571392059326,
      "logps/rejected": -0.43772149085998535,
      "loss": 0.8806,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.4616357684135437,
      "rewards/margins": 0.19494646787643433,
      "rewards/rejected": -0.656582236289978,
      "step": 220
    },
    {
      "epoch": 0.24520255863539445,
      "grad_norm": 0.5628307461738586,
      "learning_rate": 4.715508948078037e-06,
      "logits/chosen": 11.177714347839355,
      "logits/rejected": 11.534266471862793,
      "logps/chosen": -0.31991320848464966,
      "logps/rejected": -0.4394511282444,
      "loss": 0.8778,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.4798697829246521,
      "rewards/margins": 0.17930689454078674,
      "rewards/rejected": -0.6591767072677612,
      "step": 230
    },
    {
      "epoch": 0.255863539445629,
      "grad_norm": 0.40485626459121704,
      "learning_rate": 4.690766700109659e-06,
      "logits/chosen": 10.132668495178223,
      "logits/rejected": 10.29063606262207,
      "logps/chosen": -0.3195653557777405,
      "logps/rejected": -0.47949132323265076,
      "loss": 0.8551,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.47934800386428833,
      "rewards/margins": 0.23988890647888184,
      "rewards/rejected": -0.7192369699478149,
      "step": 240
    },
    {
      "epoch": 0.26652452025586354,
      "grad_norm": 0.6199322938919067,
      "learning_rate": 4.665063509461098e-06,
      "logits/chosen": 8.781888008117676,
      "logits/rejected": 9.237382888793945,
      "logps/chosen": -0.3370448052883148,
      "logps/rejected": -0.610824465751648,
      "loss": 0.8416,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.505567193031311,
      "rewards/margins": 0.4106695055961609,
      "rewards/rejected": -0.9162367582321167,
      "step": 250
    },
    {
      "epoch": 0.26652452025586354,
      "eval_logits/chosen": 8.437722206115723,
      "eval_logits/rejected": 8.843962669372559,
      "eval_logps/chosen": -0.3058585226535797,
      "eval_logps/rejected": -0.582990825176239,
      "eval_loss": 0.8036603331565857,
      "eval_rewards/accuracies": 0.6447368264198303,
      "eval_rewards/chosen": -0.4587877094745636,
      "eval_rewards/margins": 0.4156985878944397,
      "eval_rewards/rejected": -0.8744862079620361,
      "eval_runtime": 21.4423,
      "eval_samples_per_second": 28.308,
      "eval_steps_per_second": 3.544,
      "step": 250
    },
    {
      "epoch": 0.2771855010660981,
      "grad_norm": 0.3213505744934082,
      "learning_rate": 4.638410650401267e-06,
      "logits/chosen": 7.914826393127441,
      "logits/rejected": 8.010818481445312,
      "logps/chosen": -0.3556877374649048,
      "logps/rejected": -0.7540119886398315,
      "loss": 0.7811,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.5335315465927124,
      "rewards/margins": 0.5974863171577454,
      "rewards/rejected": -1.1310179233551025,
      "step": 260
    },
    {
      "epoch": 0.2878464818763326,
      "grad_norm": 1.0119378566741943,
      "learning_rate": 4.610819813755038e-06,
      "logits/chosen": 7.584845542907715,
      "logits/rejected": 7.812608242034912,
      "logps/chosen": -0.3649575412273407,
      "logps/rejected": -0.8042632937431335,
      "loss": 0.7391,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.5474363565444946,
      "rewards/margins": 0.6589586734771729,
      "rewards/rejected": -1.206395149230957,
      "step": 270
    },
    {
      "epoch": 0.29850746268656714,
      "grad_norm": 0.5339816808700562,
      "learning_rate": 4.582303101775249e-06,
      "logits/chosen": 6.687758445739746,
      "logits/rejected": 6.233181476593018,
      "logps/chosen": -0.415935218334198,
      "logps/rejected": -1.2987438440322876,
      "loss": 0.7419,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.6239027976989746,
      "rewards/margins": 1.3242127895355225,
      "rewards/rejected": -1.9481157064437866,
      "step": 280
    },
    {
      "epoch": 0.3091684434968017,
      "grad_norm": 0.3514000475406647,
      "learning_rate": 4.55287302283426e-06,
      "logits/chosen": 6.2503981590271,
      "logits/rejected": 5.798542499542236,
      "logps/chosen": -0.4319223463535309,
      "logps/rejected": -1.2257453203201294,
      "loss": 0.7235,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.6478835344314575,
      "rewards/margins": 1.1907342672348022,
      "rewards/rejected": -1.8386180400848389,
      "step": 290
    },
    {
      "epoch": 0.31982942430703626,
      "grad_norm": 0.6761008501052856,
      "learning_rate": 4.522542485937369e-06,
      "logits/chosen": 4.4480695724487305,
      "logits/rejected": 4.290585994720459,
      "logps/chosen": -0.42002564668655396,
      "logps/rejected": -1.4215493202209473,
      "loss": 0.7058,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.6300384402275085,
      "rewards/margins": 1.5022855997085571,
      "rewards/rejected": -2.132323980331421,
      "step": 300
    },
    {
      "epoch": 0.31982942430703626,
      "eval_logits/chosen": 4.789332389831543,
      "eval_logits/rejected": 4.481485366821289,
      "eval_logps/chosen": -0.4049508571624756,
      "eval_logps/rejected": -1.395646095275879,
      "eval_loss": 0.6695442199707031,
      "eval_rewards/accuracies": 0.6710526347160339,
      "eval_rewards/chosen": -0.6074262857437134,
      "eval_rewards/margins": 1.4860429763793945,
      "eval_rewards/rejected": -2.0934693813323975,
      "eval_runtime": 21.4397,
      "eval_samples_per_second": 28.312,
      "eval_steps_per_second": 3.545,
      "step": 300
    },
    {
      "epoch": 0.3304904051172708,
      "grad_norm": 0.44682690501213074,
      "learning_rate": 4.491324795060491e-06,
      "logits/chosen": 5.487166404724121,
      "logits/rejected": 4.501384258270264,
      "logps/chosen": -0.5215579867362976,
      "logps/rejected": -1.7223398685455322,
      "loss": 0.6988,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.7823370695114136,
      "rewards/margins": 1.8011726140975952,
      "rewards/rejected": -2.5835094451904297,
      "step": 310
    },
    {
      "epoch": 0.3411513859275053,
      "grad_norm": 0.41085830330848694,
      "learning_rate": 4.4592336433146e-06,
      "logits/chosen": 4.162590026855469,
      "logits/rejected": 2.876271963119507,
      "logps/chosen": -0.5402930974960327,
      "logps/rejected": -1.7925996780395508,
      "loss": 0.6811,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.8104397058486938,
      "rewards/margins": 1.8784599304199219,
      "rewards/rejected": -2.688899517059326,
      "step": 320
    },
    {
      "epoch": 0.35181236673773986,
      "grad_norm": 0.5611584186553955,
      "learning_rate": 4.426283106939474e-06,
      "logits/chosen": 4.088540077209473,
      "logits/rejected": 3.081679582595825,
      "logps/chosen": -0.541223406791687,
      "logps/rejected": -1.9464069604873657,
      "loss": 0.6614,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.8118351101875305,
      "rewards/margins": 2.1077752113342285,
      "rewards/rejected": -2.919610023498535,
      "step": 330
    },
    {
      "epoch": 0.3624733475479744,
      "grad_norm": 4.05828857421875,
      "learning_rate": 4.3924876391293915e-06,
      "logits/chosen": 3.3937134742736816,
      "logits/rejected": 2.4182538986206055,
      "logps/chosen": -0.6656067967414856,
      "logps/rejected": -1.5255868434906006,
      "loss": 0.6583,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.9984102249145508,
      "rewards/margins": 1.2899701595306396,
      "rewards/rejected": -2.2883803844451904,
      "step": 340
    },
    {
      "epoch": 0.373134328358209,
      "grad_norm": 0.8311880230903625,
      "learning_rate": 4.357862063693486e-06,
      "logits/chosen": 2.503194570541382,
      "logits/rejected": 1.5284960269927979,
      "logps/chosen": -0.6593035459518433,
      "logps/rejected": -2.211193323135376,
      "loss": 0.5911,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.9889553189277649,
      "rewards/margins": 2.3278346061706543,
      "rewards/rejected": -3.3167896270751953,
      "step": 350
    },
    {
      "epoch": 0.373134328358209,
      "eval_logits/chosen": 2.556962728500366,
      "eval_logits/rejected": 1.830418586730957,
      "eval_logps/chosen": -0.6546408534049988,
      "eval_logps/rejected": -1.9014692306518555,
      "eval_loss": 0.5961893200874329,
      "eval_rewards/accuracies": 0.6842105388641357,
      "eval_rewards/chosen": -0.9819613099098206,
      "eval_rewards/margins": 1.8702424764633179,
      "eval_rewards/rejected": -2.852203845977783,
      "eval_runtime": 21.4393,
      "eval_samples_per_second": 28.312,
      "eval_steps_per_second": 3.545,
      "step": 350
    },
    {
      "epoch": 0.3837953091684435,
      "grad_norm": 1.4237236976623535,
      "learning_rate": 4.322421568553529e-06,
      "logits/chosen": 3.0001542568206787,
      "logits/rejected": 1.9715242385864258,
      "logps/chosen": -0.8050466775894165,
      "logps/rejected": -2.2938907146453857,
      "loss": 0.58,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -1.20756995677948,
      "rewards/margins": 2.2332661151885986,
      "rewards/rejected": -3.440835952758789,
      "step": 360
    },
    {
      "epoch": 0.39445628997867804,
      "grad_norm": 2.2651443481445312,
      "learning_rate": 4.286181699082008e-06,
      "logits/chosen": 2.7526040077209473,
      "logits/rejected": 2.05066180229187,
      "logps/chosen": -1.6301355361938477,
      "logps/rejected": -2.9630703926086426,
      "loss": 0.5823,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -2.4452033042907715,
      "rewards/margins": 1.999402642250061,
      "rewards/rejected": -4.444605827331543,
      "step": 370
    },
    {
      "epoch": 0.4051172707889126,
      "grad_norm": 1.9120367765426636,
      "learning_rate": 4.249158351283414e-06,
      "logits/chosen": 1.9757938385009766,
      "logits/rejected": 1.5915673971176147,
      "logps/chosen": -2.063323497772217,
      "logps/rejected": -2.899749755859375,
      "loss": 0.5675,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -3.094984769821167,
      "rewards/margins": 1.2546398639678955,
      "rewards/rejected": -4.3496246337890625,
      "step": 380
    },
    {
      "epoch": 0.4157782515991471,
      "grad_norm": 3.0018720626831055,
      "learning_rate": 4.211367764821722e-06,
      "logits/chosen": 2.541440486907959,
      "logits/rejected": 1.7436832189559937,
      "logps/chosen": -2.279510736465454,
      "logps/rejected": -3.3447775840759277,
      "loss": 0.4969,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -3.4192657470703125,
      "rewards/margins": 1.5979007482528687,
      "rewards/rejected": -5.017167091369629,
      "step": 390
    },
    {
      "epoch": 0.42643923240938164,
      "grad_norm": 1.9656275510787964,
      "learning_rate": 4.172826515897146e-06,
      "logits/chosen": 1.6748476028442383,
      "logits/rejected": 1.0921740531921387,
      "logps/chosen": -2.147991180419922,
      "logps/rejected": -3.380042314529419,
      "loss": 0.5135,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -3.221986770629883,
      "rewards/margins": 1.8480768203735352,
      "rewards/rejected": -5.07006311416626,
      "step": 400
    },
    {
      "epoch": 0.42643923240938164,
      "eval_logits/chosen": 2.210231065750122,
      "eval_logits/rejected": 1.679926872253418,
      "eval_logps/chosen": -2.044506788253784,
      "eval_logps/rejected": -3.713956356048584,
      "eval_loss": 0.47455134987831116,
      "eval_rewards/accuracies": 0.9342105388641357,
      "eval_rewards/chosen": -3.0667598247528076,
      "eval_rewards/margins": 2.5041754245758057,
      "eval_rewards/rejected": -5.570935249328613,
      "eval_runtime": 21.4401,
      "eval_samples_per_second": 28.311,
      "eval_steps_per_second": 3.545,
      "step": 400
    },
    {
      "epoch": 0.43710021321961623,
      "grad_norm": 2.501361131668091,
      "learning_rate": 4.133551509975264e-06,
      "logits/chosen": 1.9820306301116943,
      "logits/rejected": 1.3992068767547607,
      "logps/chosen": -2.300197124481201,
      "logps/rejected": -3.813164472579956,
      "loss": 0.498,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -3.4502956867218018,
      "rewards/margins": 2.2694506645202637,
      "rewards/rejected": -5.7197465896606445,
      "step": 410
    },
    {
      "epoch": 0.44776119402985076,
      "grad_norm": 3.828648090362549,
      "learning_rate": 4.093559974371725e-06,
      "logits/chosen": 2.7997095584869385,
      "logits/rejected": 2.4387598037719727,
      "logps/chosen": -2.687736749649048,
      "logps/rejected": -4.425741195678711,
      "loss": 0.4494,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -4.031605243682861,
      "rewards/margins": 2.607006788253784,
      "rewards/rejected": -6.638613224029541,
      "step": 420
    },
    {
      "epoch": 0.4584221748400853,
      "grad_norm": 2.635803461074829,
      "learning_rate": 4.052869450695776e-06,
      "logits/chosen": 2.942661762237549,
      "logits/rejected": 2.019963026046753,
      "logps/chosen": -2.98117733001709,
      "logps/rejected": -4.717232704162598,
      "loss": 0.4796,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -4.471765518188477,
      "rewards/margins": 2.60408353805542,
      "rewards/rejected": -7.075850009918213,
      "step": 430
    },
    {
      "epoch": 0.4690831556503198,
      "grad_norm": 3.140829086303711,
      "learning_rate": 4.011497787155938e-06,
      "logits/chosen": 3.2747459411621094,
      "logits/rejected": 2.2958083152770996,
      "logps/chosen": -3.129321575164795,
      "logps/rejected": -4.921725273132324,
      "loss": 0.4468,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -4.69398307800293,
      "rewards/margins": 2.688605785369873,
      "rewards/rejected": -7.3825883865356445,
      "step": 440
    },
    {
      "epoch": 0.47974413646055436,
      "grad_norm": 2.7932240962982178,
      "learning_rate": 3.969463130731183e-06,
      "logits/chosen": 2.205420970916748,
      "logits/rejected": 1.4024155139923096,
      "logps/chosen": -2.7564563751220703,
      "logps/rejected": -4.563851356506348,
      "loss": 0.4073,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -4.1346845626831055,
      "rewards/margins": 2.711092472076416,
      "rewards/rejected": -6.8457770347595215,
      "step": 450
    },
    {
      "epoch": 0.47974413646055436,
      "eval_logits/chosen": 2.0136826038360596,
      "eval_logits/rejected": 1.561701774597168,
      "eval_logps/chosen": -2.7486908435821533,
      "eval_logps/rejected": -4.690793514251709,
      "eval_loss": 0.41499289870262146,
      "eval_rewards/accuracies": 0.9210526347160339,
      "eval_rewards/chosen": -4.123035907745361,
      "eval_rewards/margins": 2.913153648376465,
      "eval_rewards/rejected": -7.036189079284668,
      "eval_runtime": 21.4387,
      "eval_samples_per_second": 28.313,
      "eval_steps_per_second": 3.545,
      "step": 450
    },
    {
      "epoch": 0.4904051172707889,
      "grad_norm": 2.7059199810028076,
      "learning_rate": 3.92678391921108e-06,
      "logits/chosen": 2.257246494293213,
      "logits/rejected": 1.6654322147369385,
      "logps/chosen": -3.389554500579834,
      "logps/rejected": -5.6951165199279785,
      "loss": 0.4004,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -5.084332466125488,
      "rewards/margins": 3.4583427906036377,
      "rewards/rejected": -8.542675018310547,
      "step": 460
    },
    {
      "epoch": 0.5010660980810234,
      "grad_norm": 2.245579719543457,
      "learning_rate": 3.88347887310836e-06,
      "logits/chosen": 2.3386971950531006,
      "logits/rejected": 2.086036205291748,
      "logps/chosen": -3.2753937244415283,
      "logps/rejected": -5.4362359046936035,
      "loss": 0.3976,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -4.913090705871582,
      "rewards/margins": 3.241262912750244,
      "rewards/rejected": -8.154353141784668,
      "step": 470
    },
    {
      "epoch": 0.511727078891258,
      "grad_norm": 2.8131167888641357,
      "learning_rate": 3.839566987447492e-06,
      "logits/chosen": 1.6951004266738892,
      "logits/rejected": 1.3795586824417114,
      "logps/chosen": -3.2933483123779297,
      "logps/rejected": -5.050060749053955,
      "loss": 0.3982,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -4.940022945404053,
      "rewards/margins": 2.635068416595459,
      "rewards/rejected": -7.5750908851623535,
      "step": 480
    },
    {
      "epoch": 0.5223880597014925,
      "grad_norm": 2.6465814113616943,
      "learning_rate": 3.795067523432826e-06,
      "logits/chosen": 3.136894702911377,
      "logits/rejected": 2.6332411766052246,
      "logps/chosen": -4.419378757476807,
      "logps/rejected": -6.467301845550537,
      "loss": 0.3974,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": -6.629067420959473,
      "rewards/margins": 3.071885585784912,
      "rewards/rejected": -9.700952529907227,
      "step": 490
    },
    {
      "epoch": 0.5330490405117271,
      "grad_norm": 3.6718053817749023,
      "learning_rate": 3.7500000000000005e-06,
      "logits/chosen": 1.681780457496643,
      "logits/rejected": 1.0038775205612183,
      "logps/chosen": -3.266970157623291,
      "logps/rejected": -5.594450950622559,
      "loss": 0.367,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -4.900455474853516,
      "rewards/margins": 3.4912209510803223,
      "rewards/rejected": -8.391676902770996,
      "step": 500
    },
    {
      "epoch": 0.5330490405117271,
      "eval_logits/chosen": 2.110192060470581,
      "eval_logits/rejected": 1.7233155965805054,
      "eval_logps/chosen": -3.0329930782318115,
      "eval_logps/rejected": -5.3280930519104,
      "eval_loss": 0.387028306722641,
      "eval_rewards/accuracies": 0.9210526347160339,
      "eval_rewards/chosen": -4.549489498138428,
      "eval_rewards/margins": 3.4426498413085938,
      "eval_rewards/rejected": -7.9921393394470215,
      "eval_runtime": 21.4417,
      "eval_samples_per_second": 28.309,
      "eval_steps_per_second": 3.545,
      "step": 500
    }
  ],
  "logging_steps": 10,
  "max_steps": 1500,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 1.196517093403525e+18,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}