htlou's picture
Upload folder using huggingface_hub
b139799 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.983957219251337,
"eval_steps": 50,
"global_step": 279,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.053475935828877004,
"grad_norm": 75.08688576643534,
"learning_rate": 5e-07,
"logits/chosen": -2.740462064743042,
"logits/rejected": -2.7282073497772217,
"logps/chosen": -257.7225646972656,
"logps/rejected": -215.97402954101562,
"loss": 0.6903,
"rewards/accuracies": 0.3125,
"rewards/chosen": 0.011177328415215015,
"rewards/margins": 0.0018132120603695512,
"rewards/rejected": 0.009364116936922073,
"step": 5
},
{
"epoch": 0.10695187165775401,
"grad_norm": 55.24172880066687,
"learning_rate": 1e-06,
"logits/chosen": -2.706225872039795,
"logits/rejected": -2.695012092590332,
"logps/chosen": -241.26431274414062,
"logps/rejected": -213.05960083007812,
"loss": 0.6648,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": 0.5042427182197571,
"rewards/margins": 0.1558745801448822,
"rewards/rejected": 0.3483680784702301,
"step": 10
},
{
"epoch": 0.16042780748663102,
"grad_norm": 55.35417452063743,
"learning_rate": 9.991477798614637e-07,
"logits/chosen": -2.567713737487793,
"logits/rejected": -2.5728023052215576,
"logps/chosen": -252.4267578125,
"logps/rejected": -216.68722534179688,
"loss": 0.6277,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": 1.2536556720733643,
"rewards/margins": 0.6526123881340027,
"rewards/rejected": 0.6010432839393616,
"step": 15
},
{
"epoch": 0.21390374331550802,
"grad_norm": 41.54015316305311,
"learning_rate": 9.965940245625131e-07,
"logits/chosen": -2.49863862991333,
"logits/rejected": -2.4633045196533203,
"logps/chosen": -239.6151580810547,
"logps/rejected": -216.32748413085938,
"loss": 0.5937,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": 1.429532766342163,
"rewards/margins": 1.0765190124511719,
"rewards/rejected": 0.3530138432979584,
"step": 20
},
{
"epoch": 0.26737967914438504,
"grad_norm": 43.266733978086314,
"learning_rate": 9.923474395499264e-07,
"logits/chosen": -2.3930270671844482,
"logits/rejected": -2.391124725341797,
"logps/chosen": -231.81448364257812,
"logps/rejected": -213.6909942626953,
"loss": 0.5968,
"rewards/accuracies": 0.6875,
"rewards/chosen": 1.252290964126587,
"rewards/margins": 0.7059992551803589,
"rewards/rejected": 0.5462917685508728,
"step": 25
},
{
"epoch": 0.32085561497326204,
"grad_norm": 49.04579174218152,
"learning_rate": 9.86422500924775e-07,
"logits/chosen": -2.431389093399048,
"logits/rejected": -2.420288562774658,
"logps/chosen": -233.90640258789062,
"logps/rejected": -232.1009521484375,
"loss": 0.591,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": 1.3160102367401123,
"rewards/margins": 1.2142870426177979,
"rewards/rejected": 0.10172319412231445,
"step": 30
},
{
"epoch": 0.37433155080213903,
"grad_norm": 45.76490813889267,
"learning_rate": 9.788394060951227e-07,
"logits/chosen": -2.5551607608795166,
"logits/rejected": -2.560732126235962,
"logps/chosen": -243.9866180419922,
"logps/rejected": -210.0946044921875,
"loss": 0.5951,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": 0.9597422480583191,
"rewards/margins": 0.7127350568771362,
"rewards/rejected": 0.2470073401927948,
"step": 35
},
{
"epoch": 0.42780748663101603,
"grad_norm": 35.943159004433745,
"learning_rate": 9.696240049254742e-07,
"logits/chosen": -2.607058048248291,
"logits/rejected": -2.6059868335723877,
"logps/chosen": -245.34237670898438,
"logps/rejected": -215.133056640625,
"loss": 0.5946,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": 0.8869549036026001,
"rewards/margins": 1.1748888492584229,
"rewards/rejected": -0.2879341244697571,
"step": 40
},
{
"epoch": 0.48128342245989303,
"grad_norm": 43.25794768968881,
"learning_rate": 9.588077116176756e-07,
"logits/chosen": -2.5988757610321045,
"logits/rejected": -2.587991952896118,
"logps/chosen": -279.42425537109375,
"logps/rejected": -214.3015594482422,
"loss": 0.6027,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": 1.1399929523468018,
"rewards/margins": 1.116516351699829,
"rewards/rejected": 0.023476576432585716,
"step": 45
},
{
"epoch": 0.5347593582887701,
"grad_norm": 42.38890964644049,
"learning_rate": 9.464273976236516e-07,
"logits/chosen": -2.5599303245544434,
"logits/rejected": -2.567230701446533,
"logps/chosen": -254.52859497070312,
"logps/rejected": -213.7172088623047,
"loss": 0.6034,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": 1.0623753070831299,
"rewards/margins": 1.170297622680664,
"rewards/rejected": -0.10792229324579239,
"step": 50
},
{
"epoch": 0.5347593582887701,
"eval_logits/chosen": -2.5372207164764404,
"eval_logits/rejected": -2.5304667949676514,
"eval_logps/chosen": -232.14488220214844,
"eval_logps/rejected": -213.0785675048828,
"eval_loss": 0.5868557095527649,
"eval_rewards/accuracies": 0.7291666865348816,
"eval_rewards/chosen": 0.7400359511375427,
"eval_rewards/margins": 0.881316602230072,
"eval_rewards/rejected": -0.14128059148788452,
"eval_runtime": 178.1115,
"eval_samples_per_second": 14.934,
"eval_steps_per_second": 0.236,
"step": 50
},
{
"epoch": 0.5882352941176471,
"grad_norm": 35.35460256124788,
"learning_rate": 9.325252659550308e-07,
"logits/chosen": -2.5456783771514893,
"logits/rejected": -2.5366146564483643,
"logps/chosen": -250.4248504638672,
"logps/rejected": -213.6925048828125,
"loss": 0.5728,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.9326599836349487,
"rewards/margins": 1.1307684183120728,
"rewards/rejected": -0.19810837507247925,
"step": 55
},
{
"epoch": 0.6417112299465241,
"grad_norm": 36.50601945986719,
"learning_rate": 9.171487073181197e-07,
"logits/chosen": -2.497873544692993,
"logits/rejected": -2.4702892303466797,
"logps/chosen": -253.4021453857422,
"logps/rejected": -218.5993194580078,
"loss": 0.5838,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": 0.9917839169502258,
"rewards/margins": 1.002171516418457,
"rewards/rejected": -0.010387664660811424,
"step": 60
},
{
"epoch": 0.6951871657754011,
"grad_norm": 38.76671127630058,
"learning_rate": 9.003501385646448e-07,
"logits/chosen": -2.4642693996429443,
"logits/rejected": -2.444017171859741,
"logps/chosen": -231.67507934570312,
"logps/rejected": -207.4264678955078,
"loss": 0.5857,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": 0.6439246535301208,
"rewards/margins": 1.2459630966186523,
"rewards/rejected": -0.6020383834838867,
"step": 65
},
{
"epoch": 0.7486631016042781,
"grad_norm": 58.07607723312624,
"learning_rate": 8.821868240089676e-07,
"logits/chosen": -2.4684462547302246,
"logits/rejected": -2.4463274478912354,
"logps/chosen": -249.26455688476562,
"logps/rejected": -222.1261749267578,
"loss": 0.587,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": 0.7888789772987366,
"rewards/margins": 1.0215551853179932,
"rewards/rejected": -0.23267626762390137,
"step": 70
},
{
"epoch": 0.8021390374331551,
"grad_norm": 36.09293428060366,
"learning_rate": 8.62720680220876e-07,
"logits/chosen": -2.501340866088867,
"logits/rejected": -2.4918720722198486,
"logps/chosen": -225.94638061523438,
"logps/rejected": -218.7044219970703,
"loss": 0.5926,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": 0.8816758394241333,
"rewards/margins": 0.8755629658699036,
"rewards/rejected": 0.006112849805504084,
"step": 75
},
{
"epoch": 0.8556149732620321,
"grad_norm": 35.74459897866162,
"learning_rate": 8.420180649593929e-07,
"logits/chosen": -2.487574815750122,
"logits/rejected": -2.478224277496338,
"logps/chosen": -232.3713836669922,
"logps/rejected": -208.5014190673828,
"loss": 0.5303,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.8995177149772644,
"rewards/margins": 1.0680756568908691,
"rewards/rejected": -0.16855797171592712,
"step": 80
},
{
"epoch": 0.9090909090909091,
"grad_norm": 34.953324196432796,
"learning_rate": 8.201495509671036e-07,
"logits/chosen": -2.4268527030944824,
"logits/rejected": -2.394615888595581,
"logps/chosen": -247.49609375,
"logps/rejected": -214.3662567138672,
"loss": 0.5561,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 0.6183642148971558,
"rewards/margins": 1.220983862876892,
"rewards/rejected": -0.6026195883750916,
"step": 85
},
{
"epoch": 0.9625668449197861,
"grad_norm": 31.39573691027677,
"learning_rate": 7.971896853961042e-07,
"logits/chosen": -2.2767372131347656,
"logits/rejected": -2.252058506011963,
"logps/chosen": -261.1527099609375,
"logps/rejected": -220.43667602539062,
"loss": 0.4894,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 0.6145283579826355,
"rewards/margins": 2.0016326904296875,
"rewards/rejected": -1.3871045112609863,
"step": 90
},
{
"epoch": 1.0160427807486632,
"grad_norm": 17.75039945985475,
"learning_rate": 7.732167356856654e-07,
"logits/chosen": -2.166287660598755,
"logits/rejected": -2.1111903190612793,
"logps/chosen": -252.02944946289062,
"logps/rejected": -232.24282836914062,
"loss": 0.4711,
"rewards/accuracies": 0.831250011920929,
"rewards/chosen": 0.22497756779193878,
"rewards/margins": 1.9439351558685303,
"rewards/rejected": -1.7189576625823975,
"step": 95
},
{
"epoch": 1.0695187165775402,
"grad_norm": 23.02216164814203,
"learning_rate": 7.48312422757881e-07,
"logits/chosen": -2.144843339920044,
"logits/rejected": -2.077284336090088,
"logps/chosen": -268.06707763671875,
"logps/rejected": -234.98910522460938,
"loss": 0.2659,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.0067119598388672,
"rewards/margins": 2.941673994064331,
"rewards/rejected": -1.9349620342254639,
"step": 100
},
{
"epoch": 1.0695187165775402,
"eval_logits/chosen": -2.1482443809509277,
"eval_logits/rejected": -2.1145682334899902,
"eval_logps/chosen": -228.6596221923828,
"eval_logps/rejected": -215.86151123046875,
"eval_loss": 0.5500321388244629,
"eval_rewards/accuracies": 0.7678571343421936,
"eval_rewards/chosen": 1.0885626077651978,
"eval_rewards/margins": 1.508140206336975,
"eval_rewards/rejected": -0.419577419757843,
"eval_runtime": 178.0144,
"eval_samples_per_second": 14.943,
"eval_steps_per_second": 0.236,
"step": 100
},
{
"epoch": 1.1229946524064172,
"grad_norm": 16.09113711061208,
"learning_rate": 7.225616424408044e-07,
"logits/chosen": -2.157158374786377,
"logits/rejected": -2.128385066986084,
"logps/chosen": -237.67849731445312,
"logps/rejected": -231.83877563476562,
"loss": 0.2083,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 2.0537843704223633,
"rewards/margins": 3.18843412399292,
"rewards/rejected": -1.134649634361267,
"step": 105
},
{
"epoch": 1.1764705882352942,
"grad_norm": 17.181507055460564,
"learning_rate": 6.96052176068713e-07,
"logits/chosen": -2.2130496501922607,
"logits/rejected": -2.1648192405700684,
"logps/chosen": -225.992431640625,
"logps/rejected": -225.93643188476562,
"loss": 0.2286,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.768799066543579,
"rewards/margins": 2.9698195457458496,
"rewards/rejected": -1.2010207176208496,
"step": 110
},
{
"epoch": 1.2299465240641712,
"grad_norm": 23.325905689437562,
"learning_rate": 6.688743912460229e-07,
"logits/chosen": -2.257979154586792,
"logits/rejected": -2.24532151222229,
"logps/chosen": -245.7477569580078,
"logps/rejected": -220.7982177734375,
"loss": 0.2429,
"rewards/accuracies": 0.90625,
"rewards/chosen": 1.9697185754776,
"rewards/margins": 3.129720687866211,
"rewards/rejected": -1.1600019931793213,
"step": 115
},
{
"epoch": 1.2834224598930482,
"grad_norm": 19.723516785510316,
"learning_rate": 6.411209337949213e-07,
"logits/chosen": -2.244814395904541,
"logits/rejected": -2.2148585319519043,
"logps/chosen": -234.61392211914062,
"logps/rejected": -258.7725830078125,
"loss": 0.2529,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.9211610555648804,
"rewards/margins": 4.399371147155762,
"rewards/rejected": -2.478209972381592,
"step": 120
},
{
"epoch": 1.3368983957219251,
"grad_norm": 21.739425991143058,
"learning_rate": 6.128864119368233e-07,
"logits/chosen": -2.119013547897339,
"logits/rejected": -2.083933115005493,
"logps/chosen": -228.73965454101562,
"logps/rejected": -208.091552734375,
"loss": 0.2611,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.3465216159820557,
"rewards/margins": 3.1292195320129395,
"rewards/rejected": -1.7826979160308838,
"step": 125
},
{
"epoch": 1.3903743315508021,
"grad_norm": 22.912282028009347,
"learning_rate": 5.842670737842467e-07,
"logits/chosen": -2.0183815956115723,
"logits/rejected": -1.987648367881775,
"logps/chosen": -251.6590118408203,
"logps/rejected": -229.6842803955078,
"loss": 0.2933,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 1.9206100702285767,
"rewards/margins": 3.7730064392089844,
"rewards/rejected": -1.8523967266082764,
"step": 130
},
{
"epoch": 1.4438502673796791,
"grad_norm": 22.11760289550141,
"learning_rate": 5.553604792424922e-07,
"logits/chosen": -2.0348455905914307,
"logits/rejected": -1.9738250970840454,
"logps/chosen": -245.8058319091797,
"logps/rejected": -270.13043212890625,
"loss": 0.2246,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.6829955577850342,
"rewards/margins": 3.753596782684326,
"rewards/rejected": -2.070600986480713,
"step": 135
},
{
"epoch": 1.4973262032085561,
"grad_norm": 28.32892420101821,
"learning_rate": 5.262651674395798e-07,
"logits/chosen": -2.038222312927246,
"logits/rejected": -1.9750888347625732,
"logps/chosen": -240.39102172851562,
"logps/rejected": -231.63534545898438,
"loss": 0.2528,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.2107157707214355,
"rewards/margins": 3.3977603912353516,
"rewards/rejected": -2.187044143676758,
"step": 140
},
{
"epoch": 1.5508021390374331,
"grad_norm": 23.418588451486066,
"learning_rate": 4.970803208181314e-07,
"logits/chosen": -2.0812251567840576,
"logits/rejected": -2.0191798210144043,
"logps/chosen": -231.6563720703125,
"logps/rejected": -240.78842163085938,
"loss": 0.292,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.538320779800415,
"rewards/margins": 3.8071465492248535,
"rewards/rejected": -2.2688257694244385,
"step": 145
},
{
"epoch": 1.6042780748663101,
"grad_norm": 19.90761377348951,
"learning_rate": 4.679054270342702e-07,
"logits/chosen": -2.124783992767334,
"logits/rejected": -2.077249050140381,
"logps/chosen": -235.0903778076172,
"logps/rejected": -243.20028686523438,
"loss": 0.2599,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.3953964710235596,
"rewards/margins": 3.141361951828003,
"rewards/rejected": -1.7459657192230225,
"step": 150
},
{
"epoch": 1.6042780748663101,
"eval_logits/chosen": -2.1441855430603027,
"eval_logits/rejected": -2.112197160720825,
"eval_logps/chosen": -231.94912719726562,
"eval_logps/rejected": -222.50692749023438,
"eval_loss": 0.546504020690918,
"eval_rewards/accuracies": 0.7678571343421936,
"eval_rewards/chosen": 0.7596126198768616,
"eval_rewards/margins": 1.8437296152114868,
"eval_rewards/rejected": -1.0841171741485596,
"eval_runtime": 177.8643,
"eval_samples_per_second": 14.955,
"eval_steps_per_second": 0.236,
"step": 150
},
{
"epoch": 1.6577540106951871,
"grad_norm": 21.93947550108777,
"learning_rate": 4.3883993981608567e-07,
"logits/chosen": -2.181997776031494,
"logits/rejected": -2.123137950897217,
"logps/chosen": -242.07229614257812,
"logps/rejected": -237.5500030517578,
"loss": 0.2643,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.5722686052322388,
"rewards/margins": 3.503065824508667,
"rewards/rejected": -1.9307969808578491,
"step": 155
},
{
"epoch": 1.7112299465240641,
"grad_norm": 24.144023300435187,
"learning_rate": 4.0998293993775234e-07,
"logits/chosen": -2.226807117462158,
"logits/rejected": -2.199307680130005,
"logps/chosen": -237.28543090820312,
"logps/rejected": -238.6820831298828,
"loss": 0.2942,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.480787992477417,
"rewards/margins": 3.468074083328247,
"rewards/rejected": -1.987285852432251,
"step": 160
},
{
"epoch": 1.7647058823529411,
"grad_norm": 24.407740221287682,
"learning_rate": 3.814327974650066e-07,
"logits/chosen": -2.258931875228882,
"logits/rejected": -2.2550344467163086,
"logps/chosen": -239.78543090820312,
"logps/rejected": -235.02456665039062,
"loss": 0.2401,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.147258996963501,
"rewards/margins": 3.696079969406128,
"rewards/rejected": -2.548820972442627,
"step": 165
},
{
"epoch": 1.8181818181818183,
"grad_norm": 22.96884736732694,
"learning_rate": 3.532868364233416e-07,
"logits/chosen": -2.2801005840301514,
"logits/rejected": -2.2604260444641113,
"logps/chosen": -256.41278076171875,
"logps/rejected": -237.5870361328125,
"loss": 0.2635,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.8354307413101196,
"rewards/margins": 3.1207101345062256,
"rewards/rejected": -2.2852795124053955,
"step": 170
},
{
"epoch": 1.8716577540106951,
"grad_norm": 21.468713981869463,
"learning_rate": 3.256410030320304e-07,
"logits/chosen": -2.2765324115753174,
"logits/rejected": -2.2345592975616455,
"logps/chosen": -226.8909912109375,
"logps/rejected": -206.7959442138672,
"loss": 0.2622,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 0.8324386477470398,
"rewards/margins": 3.093674659729004,
"rewards/rejected": -2.2612357139587402,
"step": 175
},
{
"epoch": 1.9251336898395723,
"grad_norm": 25.530423897408046,
"learning_rate": 2.985895386349233e-07,
"logits/chosen": -2.2696175575256348,
"logits/rejected": -2.244253635406494,
"logps/chosen": -264.6595458984375,
"logps/rejected": -227.76455688476562,
"loss": 0.296,
"rewards/accuracies": 0.90625,
"rewards/chosen": 1.391035795211792,
"rewards/margins": 3.3874428272247314,
"rewards/rejected": -1.99640691280365,
"step": 180
},
{
"epoch": 1.9786096256684491,
"grad_norm": 23.57510964659792,
"learning_rate": 2.7222465844296514e-07,
"logits/chosen": -2.2398269176483154,
"logits/rejected": -2.2095816135406494,
"logps/chosen": -236.76220703125,
"logps/rejected": -239.87649536132812,
"loss": 0.3228,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.6798350811004639,
"rewards/margins": 3.8687007427215576,
"rewards/rejected": -2.1888651847839355,
"step": 185
},
{
"epoch": 2.0320855614973263,
"grad_norm": 11.237636994206616,
"learning_rate": 2.466362371835544e-07,
"logits/chosen": -2.2077956199645996,
"logits/rejected": -2.1770200729370117,
"logps/chosen": -253.8419647216797,
"logps/rejected": -241.77560424804688,
"loss": 0.2084,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": 1.6418447494506836,
"rewards/margins": 4.010140419006348,
"rewards/rejected": -2.368295431137085,
"step": 190
},
{
"epoch": 2.085561497326203,
"grad_norm": 14.896733636093304,
"learning_rate": 2.2191150272833386e-07,
"logits/chosen": -2.168065309524536,
"logits/rejected": -2.139242649078369,
"logps/chosen": -239.03555297851562,
"logps/rejected": -262.7664794921875,
"loss": 0.1457,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.6537681818008423,
"rewards/margins": 4.138239860534668,
"rewards/rejected": -2.484471559524536,
"step": 195
},
{
"epoch": 2.1390374331550803,
"grad_norm": 15.573127323302417,
"learning_rate": 1.9813473874379395e-07,
"logits/chosen": -2.1352179050445557,
"logits/rejected": -2.1201109886169434,
"logps/chosen": -243.68276977539062,
"logps/rejected": -257.8447265625,
"loss": 0.1366,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": 1.748744249343872,
"rewards/margins": 4.428500652313232,
"rewards/rejected": -2.6797564029693604,
"step": 200
},
{
"epoch": 2.1390374331550803,
"eval_logits/chosen": -2.1154227256774902,
"eval_logits/rejected": -2.0823864936828613,
"eval_logps/chosen": -233.65536499023438,
"eval_logps/rejected": -225.89039611816406,
"eval_loss": 0.5221620202064514,
"eval_rewards/accuracies": 0.7857142686843872,
"eval_rewards/chosen": 0.5889881253242493,
"eval_rewards/margins": 2.0114519596099854,
"eval_rewards/rejected": -1.4224637746810913,
"eval_runtime": 177.8889,
"eval_samples_per_second": 14.953,
"eval_steps_per_second": 0.236,
"step": 200
},
{
"epoch": 2.192513368983957,
"grad_norm": 18.629951176458935,
"learning_rate": 1.7538699737832237e-07,
"logits/chosen": -2.113478899002075,
"logits/rejected": -2.069943904876709,
"logps/chosen": -237.72598266601562,
"logps/rejected": -261.28759765625,
"loss": 0.1322,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.763622522354126,
"rewards/margins": 4.295041084289551,
"rewards/rejected": -2.531418561935425,
"step": 205
},
{
"epoch": 2.2459893048128343,
"grad_norm": 14.516798660616674,
"learning_rate": 1.5374582296511053e-07,
"logits/chosen": -2.103269577026367,
"logits/rejected": -2.035609245300293,
"logps/chosen": -238.05905151367188,
"logps/rejected": -222.5261688232422,
"loss": 0.146,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.79998779296875,
"rewards/margins": 4.091170310974121,
"rewards/rejected": -2.29118275642395,
"step": 210
},
{
"epoch": 2.299465240641711,
"grad_norm": 17.56968018814267,
"learning_rate": 1.3328498768278418e-07,
"logits/chosen": -2.057720184326172,
"logits/rejected": -2.012430191040039,
"logps/chosen": -234.6757049560547,
"logps/rejected": -250.71371459960938,
"loss": 0.1554,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.705297827720642,
"rewards/margins": 4.06239128112793,
"rewards/rejected": -2.357093334197998,
"step": 215
},
{
"epoch": 2.3529411764705883,
"grad_norm": 12.58961314545,
"learning_rate": 1.1407424007485928e-07,
"logits/chosen": -2.0317530632019043,
"logits/rejected": -1.9697539806365967,
"logps/chosen": -247.4468536376953,
"logps/rejected": -237.97738647460938,
"loss": 0.1268,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": 2.1162328720092773,
"rewards/margins": 4.372066020965576,
"rewards/rejected": -2.2558329105377197,
"step": 220
},
{
"epoch": 2.406417112299465,
"grad_norm": 21.013389395031766,
"learning_rate": 9.617906728528679e-08,
"logits/chosen": -2.0227417945861816,
"logits/rejected": -1.967592477798462,
"logps/chosen": -222.5628662109375,
"logps/rejected": -241.11947631835938,
"loss": 0.1401,
"rewards/accuracies": 0.96875,
"rewards/chosen": 1.6054117679595947,
"rewards/margins": 3.956458330154419,
"rewards/rejected": -2.3510468006134033,
"step": 225
},
{
"epoch": 2.4598930481283423,
"grad_norm": 15.57246636151293,
"learning_rate": 7.966047182060226e-08,
"logits/chosen": -1.9939205646514893,
"logits/rejected": -1.947488784790039,
"logps/chosen": -233.0928192138672,
"logps/rejected": -235.1240692138672,
"loss": 0.1625,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.5731914043426514,
"rewards/margins": 3.795226573944092,
"rewards/rejected": -2.2220349311828613,
"step": 230
},
{
"epoch": 2.5133689839572195,
"grad_norm": 13.432171621332934,
"learning_rate": 6.457476359966684e-08,
"logits/chosen": -1.992582082748413,
"logits/rejected": -1.917047142982483,
"logps/chosen": -227.5784912109375,
"logps/rejected": -232.44485473632812,
"loss": 0.1397,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.6688787937164307,
"rewards/margins": 3.8221230506896973,
"rewards/rejected": -2.1532444953918457,
"step": 235
},
{
"epoch": 2.5668449197860963,
"grad_norm": 17.803144360723795,
"learning_rate": 5.097336799988067e-08,
"logits/chosen": -1.9870818853378296,
"logits/rejected": -1.9163825511932373,
"logps/chosen": -236.98666381835938,
"logps/rejected": -237.57760620117188,
"loss": 0.1576,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.9307695627212524,
"rewards/margins": 4.332042217254639,
"rewards/rejected": -2.4012725353240967,
"step": 240
},
{
"epoch": 2.620320855614973,
"grad_norm": 17.516307037134144,
"learning_rate": 3.8902650554212826e-08,
"logits/chosen": -1.971273422241211,
"logits/rejected": -1.9711790084838867,
"logps/chosen": -243.27542114257812,
"logps/rejected": -231.23574829101562,
"loss": 0.1618,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 2.017270565032959,
"rewards/margins": 4.455116271972656,
"rewards/rejected": -2.4378464221954346,
"step": 245
},
{
"epoch": 2.6737967914438503,
"grad_norm": 19.66674156907619,
"learning_rate": 2.8403758896638707e-08,
"logits/chosen": -1.9871242046356201,
"logits/rejected": -1.9288272857666016,
"logps/chosen": -238.3242645263672,
"logps/rejected": -238.7816162109375,
"loss": 0.1488,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.674863576889038,
"rewards/margins": 4.010067939758301,
"rewards/rejected": -2.335204839706421,
"step": 250
},
{
"epoch": 2.6737967914438503,
"eval_logits/chosen": -1.97805917263031,
"eval_logits/rejected": -1.932228446006775,
"eval_logps/chosen": -233.5652618408203,
"eval_logps/rejected": -226.9253387451172,
"eval_loss": 0.5411336421966553,
"eval_rewards/accuracies": 0.7767857313156128,
"eval_rewards/chosen": 0.5979987382888794,
"eval_rewards/margins": 2.1239590644836426,
"eval_rewards/rejected": -1.5259599685668945,
"eval_runtime": 178.7366,
"eval_samples_per_second": 14.882,
"eval_steps_per_second": 0.235,
"step": 250
},
{
"epoch": 2.7272727272727275,
"grad_norm": 20.207169205524593,
"learning_rate": 1.951248249476961e-08,
"logits/chosen": -1.9819082021713257,
"logits/rejected": -1.9226843118667603,
"logps/chosen": -231.9635009765625,
"logps/rejected": -247.98446655273438,
"loss": 0.1533,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.6299917697906494,
"rewards/margins": 4.123189449310303,
"rewards/rejected": -2.4931979179382324,
"step": 255
},
{
"epoch": 2.7807486631016043,
"grad_norm": 16.578647534749194,
"learning_rate": 1.2259130647833626e-08,
"logits/chosen": -1.9796741008758545,
"logits/rejected": -1.9408137798309326,
"logps/chosen": -235.86093139648438,
"logps/rejected": -228.4602813720703,
"loss": 0.1239,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": 1.7039449214935303,
"rewards/margins": 4.0229291915893555,
"rewards/rejected": -2.3189845085144043,
"step": 260
},
{
"epoch": 2.834224598930481,
"grad_norm": 16.969502053181888,
"learning_rate": 6.668429165893996e-09,
"logits/chosen": -1.9918749332427979,
"logits/rejected": -1.9296365976333618,
"logps/chosen": -262.6291809082031,
"logps/rejected": -232.1147918701172,
"loss": 0.1353,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.8621528148651123,
"rewards/margins": 4.288115501403809,
"rewards/rejected": -2.4259629249572754,
"step": 265
},
{
"epoch": 2.8877005347593583,
"grad_norm": 14.34594308225596,
"learning_rate": 2.759436082516664e-09,
"logits/chosen": -1.991707444190979,
"logits/rejected": -1.9247620105743408,
"logps/chosen": -240.2772674560547,
"logps/rejected": -234.63717651367188,
"loss": 0.1532,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.6390937566757202,
"rewards/margins": 4.325857162475586,
"rewards/rejected": -2.686763286590576,
"step": 270
},
{
"epoch": 2.9411764705882355,
"grad_norm": 14.078396993164128,
"learning_rate": 5.454766882097006e-10,
"logits/chosen": -1.9781386852264404,
"logits/rejected": -1.933526635169983,
"logps/chosen": -232.15744018554688,
"logps/rejected": -263.4436340332031,
"loss": 0.1405,
"rewards/accuracies": 0.90625,
"rewards/chosen": 1.8876819610595703,
"rewards/margins": 4.5024919509887695,
"rewards/rejected": -2.6148104667663574,
"step": 275
},
{
"epoch": 2.983957219251337,
"step": 279,
"total_flos": 3289753017384960.0,
"train_loss": 0.3334879027045329,
"train_runtime": 10390.4498,
"train_samples_per_second": 6.91,
"train_steps_per_second": 0.027
}
],
"logging_steps": 5,
"max_steps": 279,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3289753017384960.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}