Llama-3-8B_length / trainer_state.json
Taywon's picture
Model save
abfb964 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9958330532965927,
"eval_steps": 100,
"global_step": 464,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004301364339001277,
"grad_norm": 64.5691909790039,
"learning_rate": 1.0638297872340425e-08,
"logits/chosen": -1.1015625,
"logits/rejected": -1.109375,
"logps/chosen": -189.0,
"logps/rejected": -164.0,
"loss": 44.25,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.04301364339001277,
"grad_norm": 67.05587005615234,
"learning_rate": 1.0638297872340425e-07,
"logits/chosen": -1.0546875,
"logits/rejected": -1.0625,
"logps/chosen": -186.0,
"logps/rejected": -156.0,
"loss": 44.4067,
"rewards/accuracies": 0.2100694477558136,
"rewards/chosen": -0.0020751953125,
"rewards/margins": -0.002960205078125,
"rewards/rejected": 0.000888824462890625,
"step": 10
},
{
"epoch": 0.08602728678002554,
"grad_norm": 56.3581428527832,
"learning_rate": 2.127659574468085e-07,
"logits/chosen": -1.046875,
"logits/rejected": -1.0703125,
"logps/chosen": -180.0,
"logps/rejected": -158.0,
"loss": 44.3324,
"rewards/accuracies": 0.27031248807907104,
"rewards/chosen": -0.000431060791015625,
"rewards/margins": -0.0002727508544921875,
"rewards/rejected": -0.000156402587890625,
"step": 20
},
{
"epoch": 0.12904093017003831,
"grad_norm": 59.68115997314453,
"learning_rate": 3.1914893617021275e-07,
"logits/chosen": -1.046875,
"logits/rejected": -1.0703125,
"logps/chosen": -193.0,
"logps/rejected": -151.0,
"loss": 44.3242,
"rewards/accuracies": 0.26875001192092896,
"rewards/chosen": 0.000507354736328125,
"rewards/margins": -0.0001468658447265625,
"rewards/rejected": 0.0006561279296875,
"step": 30
},
{
"epoch": 0.17205457356005108,
"grad_norm": 58.759300231933594,
"learning_rate": 4.25531914893617e-07,
"logits/chosen": -1.0546875,
"logits/rejected": -1.078125,
"logps/chosen": -186.0,
"logps/rejected": -151.0,
"loss": 44.2062,
"rewards/accuracies": 0.2874999940395355,
"rewards/chosen": 0.0030670166015625,
"rewards/margins": 0.0029144287109375,
"rewards/rejected": 0.0001468658447265625,
"step": 40
},
{
"epoch": 0.21506821695006384,
"grad_norm": 65.82920837402344,
"learning_rate": 4.999361498869529e-07,
"logits/chosen": -1.0546875,
"logits/rejected": -1.078125,
"logps/chosen": -184.0,
"logps/rejected": -158.0,
"loss": 44.4336,
"rewards/accuracies": 0.23906250298023224,
"rewards/chosen": -0.0015869140625,
"rewards/margins": -0.003143310546875,
"rewards/rejected": 0.00156402587890625,
"step": 50
},
{
"epoch": 0.25808186034007663,
"grad_norm": 52.45038986206055,
"learning_rate": 4.988019438437758e-07,
"logits/chosen": -1.0390625,
"logits/rejected": -1.0546875,
"logps/chosen": -178.0,
"logps/rejected": -159.0,
"loss": 44.2992,
"rewards/accuracies": 0.2671875059604645,
"rewards/chosen": 0.0048828125,
"rewards/margins": 0.001068115234375,
"rewards/rejected": 0.003814697265625,
"step": 60
},
{
"epoch": 0.30109550373008936,
"grad_norm": 57.27840805053711,
"learning_rate": 4.962562537324176e-07,
"logits/chosen": -1.0390625,
"logits/rejected": -1.0703125,
"logps/chosen": -177.0,
"logps/rejected": -158.0,
"loss": 44.3105,
"rewards/accuracies": 0.2750000059604645,
"rewards/chosen": 0.004791259765625,
"rewards/margins": 0.00021648406982421875,
"rewards/rejected": 0.00457763671875,
"step": 70
},
{
"epoch": 0.34410914712010215,
"grad_norm": 63.51906204223633,
"learning_rate": 4.923135215663896e-07,
"logits/chosen": -1.03125,
"logits/rejected": -1.0625,
"logps/chosen": -179.0,
"logps/rejected": -155.0,
"loss": 44.3031,
"rewards/accuracies": 0.2874999940395355,
"rewards/chosen": 0.0072021484375,
"rewards/margins": 0.0003147125244140625,
"rewards/rejected": 0.00689697265625,
"step": 80
},
{
"epoch": 0.38712279051011494,
"grad_norm": 62.818267822265625,
"learning_rate": 4.8699611495083e-07,
"logits/chosen": -1.046875,
"logits/rejected": -1.0703125,
"logps/chosen": -181.0,
"logps/rejected": -152.0,
"loss": 44.2629,
"rewards/accuracies": 0.3046875,
"rewards/chosen": 0.0087890625,
"rewards/margins": 0.002227783203125,
"rewards/rejected": 0.006561279296875,
"step": 90
},
{
"epoch": 0.4301364339001277,
"grad_norm": 62.98960494995117,
"learning_rate": 4.803342001883246e-07,
"logits/chosen": -1.03125,
"logits/rejected": -1.0546875,
"logps/chosen": -187.0,
"logps/rejected": -165.0,
"loss": 44.2797,
"rewards/accuracies": 0.3203125,
"rewards/chosen": 0.01214599609375,
"rewards/margins": 0.00173187255859375,
"rewards/rejected": 0.0103759765625,
"step": 100
},
{
"epoch": 0.4301364339001277,
"eval_logits/chosen": -1.0546875,
"eval_logits/rejected": -1.0703125,
"eval_logps/chosen": -174.0,
"eval_logps/rejected": -163.0,
"eval_loss": 0.6920444369316101,
"eval_rewards/accuracies": 0.32935988903045654,
"eval_rewards/chosen": 0.01458740234375,
"eval_rewards/margins": 0.00140380859375,
"eval_rewards/rejected": 0.01318359375,
"eval_runtime": 2133.8068,
"eval_samples_per_second": 2.869,
"eval_steps_per_second": 0.717,
"step": 100
},
{
"epoch": 0.47315007729014047,
"grad_norm": 61.54692459106445,
"learning_rate": 4.72365571141757e-07,
"logits/chosen": -1.0546875,
"logits/rejected": -1.0859375,
"logps/chosen": -181.0,
"logps/rejected": -162.0,
"loss": 44.2184,
"rewards/accuracies": 0.3499999940395355,
"rewards/chosen": 0.015625,
"rewards/margins": 0.00396728515625,
"rewards/rejected": 0.01165771484375,
"step": 110
},
{
"epoch": 0.5161637206801533,
"grad_norm": 62.887245178222656,
"learning_rate": 4.6313543482507056e-07,
"logits/chosen": -1.0625,
"logits/rejected": -1.0625,
"logps/chosen": -176.0,
"logps/rejected": -154.0,
"loss": 44.1973,
"rewards/accuracies": 0.3921875059604645,
"rewards/chosen": 0.02099609375,
"rewards/margins": 0.0038299560546875,
"rewards/rejected": 0.0172119140625,
"step": 120
},
{
"epoch": 0.559177364070166,
"grad_norm": 94.9865951538086,
"learning_rate": 4.526961549383108e-07,
"logits/chosen": -1.0546875,
"logits/rejected": -1.046875,
"logps/chosen": -191.0,
"logps/rejected": -158.0,
"loss": 44.2,
"rewards/accuracies": 0.3656249940395355,
"rewards/chosen": 0.02490234375,
"rewards/margins": 0.004547119140625,
"rewards/rejected": 0.0203857421875,
"step": 130
},
{
"epoch": 0.6021910074601787,
"grad_norm": 62.164451599121094,
"learning_rate": 4.4110695480190597e-07,
"logits/chosen": -1.078125,
"logits/rejected": -1.0859375,
"logps/chosen": -179.0,
"logps/rejected": -153.0,
"loss": 44.1516,
"rewards/accuracies": 0.40937501192092896,
"rewards/chosen": 0.03076171875,
"rewards/margins": 0.00604248046875,
"rewards/rejected": 0.024658203125,
"step": 140
},
{
"epoch": 0.6452046508501915,
"grad_norm": 63.30311965942383,
"learning_rate": 4.284335813754769e-07,
"logits/chosen": -1.0625,
"logits/rejected": -1.078125,
"logps/chosen": -182.0,
"logps/rejected": -158.0,
"loss": 44.2184,
"rewards/accuracies": 0.37968748807907104,
"rewards/chosen": 0.03369140625,
"rewards/margins": 0.0037689208984375,
"rewards/rejected": 0.030029296875,
"step": 150
},
{
"epoch": 0.6882182942402043,
"grad_norm": 67.73347473144531,
"learning_rate": 4.1474793226723825e-07,
"logits/chosen": -1.046875,
"logits/rejected": -1.0546875,
"logps/chosen": -172.0,
"logps/rejected": -155.0,
"loss": 44.1336,
"rewards/accuracies": 0.4140625,
"rewards/chosen": 0.04052734375,
"rewards/margins": 0.006866455078125,
"rewards/rejected": 0.033447265625,
"step": 160
},
{
"epoch": 0.7312319376302171,
"grad_norm": 69.98623657226562,
"learning_rate": 4.001276478500126e-07,
"logits/chosen": -1.0546875,
"logits/rejected": -1.0703125,
"logps/chosen": -183.0,
"logps/rejected": -156.0,
"loss": 44.0789,
"rewards/accuracies": 0.42656248807907104,
"rewards/chosen": 0.044921875,
"rewards/margins": 0.00726318359375,
"rewards/rejected": 0.03759765625,
"step": 170
},
{
"epoch": 0.7742455810202299,
"grad_norm": 80.5298843383789,
"learning_rate": 3.846556707978337e-07,
"logits/chosen": -1.0390625,
"logits/rejected": -1.078125,
"logps/chosen": -175.0,
"logps/rejected": -146.0,
"loss": 43.9258,
"rewards/accuracies": 0.48124998807907104,
"rewards/chosen": 0.052734375,
"rewards/margins": 0.01385498046875,
"rewards/rejected": 0.038818359375,
"step": 180
},
{
"epoch": 0.8172592244102426,
"grad_norm": 72.71257781982422,
"learning_rate": 3.684197755419419e-07,
"logits/chosen": -1.0625,
"logits/rejected": -1.09375,
"logps/chosen": -180.0,
"logps/rejected": -157.0,
"loss": 43.9504,
"rewards/accuracies": 0.45781248807907104,
"rewards/chosen": 0.0615234375,
"rewards/margins": 0.01220703125,
"rewards/rejected": 0.04931640625,
"step": 190
},
{
"epoch": 0.8602728678002554,
"grad_norm": 70.82247924804688,
"learning_rate": 3.5151207031562633e-07,
"logits/chosen": -1.0546875,
"logits/rejected": -1.0703125,
"logps/chosen": -184.0,
"logps/rejected": -157.0,
"loss": 44.0094,
"rewards/accuracies": 0.44843751192092896,
"rewards/chosen": 0.0703125,
"rewards/margins": 0.01141357421875,
"rewards/rejected": 0.058837890625,
"step": 200
},
{
"epoch": 0.8602728678002554,
"eval_logits/chosen": -1.0625,
"eval_logits/rejected": -1.078125,
"eval_logps/chosen": -173.0,
"eval_logps/rejected": -162.0,
"eval_loss": 0.6888893246650696,
"eval_rewards/accuracies": 0.4306009113788605,
"eval_rewards/chosen": 0.0703125,
"eval_rewards/margins": 0.00848388671875,
"eval_rewards/rejected": 0.061767578125,
"eval_runtime": 2136.7935,
"eval_samples_per_second": 2.865,
"eval_steps_per_second": 0.716,
"step": 200
},
{
"epoch": 0.9032865111902681,
"grad_norm": 58.35607147216797,
"learning_rate": 3.34028474612874e-07,
"logits/chosen": -1.03125,
"logits/rejected": -1.0703125,
"logps/chosen": -184.0,
"logps/rejected": -162.0,
"loss": 43.9633,
"rewards/accuracies": 0.4625000059604645,
"rewards/chosen": 0.07666015625,
"rewards/margins": 0.01165771484375,
"rewards/rejected": 0.06494140625,
"step": 210
},
{
"epoch": 0.9463001545802809,
"grad_norm": 62.95896911621094,
"learning_rate": 3.1606817502526736e-07,
"logits/chosen": -1.0546875,
"logits/rejected": -1.0859375,
"logps/chosen": -181.0,
"logps/rejected": -158.0,
"loss": 44.0883,
"rewards/accuracies": 0.4234375059604645,
"rewards/chosen": 0.08203125,
"rewards/margins": 0.007720947265625,
"rewards/rejected": 0.07421875,
"step": 220
},
{
"epoch": 0.9893137979702937,
"grad_norm": 70.77607727050781,
"learning_rate": 2.9773306254423513e-07,
"logits/chosen": -1.0703125,
"logits/rejected": -1.078125,
"logps/chosen": -186.0,
"logps/rejected": -159.0,
"loss": 44.0246,
"rewards/accuracies": 0.4906249940395355,
"rewards/chosen": 0.0888671875,
"rewards/margins": 0.01190185546875,
"rewards/rejected": 0.0771484375,
"step": 230
},
{
"epoch": 1.0323274413603065,
"grad_norm": 68.3196792602539,
"learning_rate": 2.791271545209101e-07,
"logits/chosen": -1.0703125,
"logits/rejected": -1.0859375,
"logps/chosen": -190.0,
"logps/rejected": -163.0,
"loss": 43.8422,
"rewards/accuracies": 0.4671874940395355,
"rewards/chosen": 0.09716796875,
"rewards/margins": 0.0157470703125,
"rewards/rejected": 0.0810546875,
"step": 240
},
{
"epoch": 1.0753410847503193,
"grad_norm": 65.2646484375,
"learning_rate": 2.603560045628857e-07,
"logits/chosen": -1.0625,
"logits/rejected": -1.078125,
"logps/chosen": -172.0,
"logps/rejected": -152.0,
"loss": 43.9477,
"rewards/accuracies": 0.45625001192092896,
"rewards/chosen": 0.09521484375,
"rewards/margins": 0.01373291015625,
"rewards/rejected": 0.08154296875,
"step": 250
},
{
"epoch": 1.118354728140332,
"grad_norm": 76.79366302490234,
"learning_rate": 2.4152610371560093e-07,
"logits/chosen": -1.0546875,
"logits/rejected": -1.09375,
"logps/chosen": -172.0,
"logps/rejected": -152.0,
"loss": 43.9469,
"rewards/accuracies": 0.47968751192092896,
"rewards/chosen": 0.10107421875,
"rewards/margins": 0.01409912109375,
"rewards/rejected": 0.0869140625,
"step": 260
},
{
"epoch": 1.1613683715303447,
"grad_norm": 62.805416107177734,
"learning_rate": 2.2274427632552503e-07,
"logits/chosen": -1.0859375,
"logits/rejected": -1.0703125,
"logps/chosen": -186.0,
"logps/rejected": -158.0,
"loss": 43.834,
"rewards/accuracies": 0.47343748807907104,
"rewards/chosen": 0.111328125,
"rewards/margins": 0.0164794921875,
"rewards/rejected": 0.09521484375,
"step": 270
},
{
"epoch": 1.2043820149203575,
"grad_norm": 68.53240966796875,
"learning_rate": 2.0411707401248403e-07,
"logits/chosen": -1.0625,
"logits/rejected": -1.0703125,
"logps/chosen": -169.0,
"logps/rejected": -154.0,
"loss": 43.7711,
"rewards/accuracies": 0.484375,
"rewards/chosen": 0.11767578125,
"rewards/margins": 0.01953125,
"rewards/rejected": 0.09814453125,
"step": 280
},
{
"epoch": 1.2473956583103702,
"grad_norm": 73.78852844238281,
"learning_rate": 1.8575017118919928e-07,
"logits/chosen": -1.0625,
"logits/rejected": -1.078125,
"logps/chosen": -189.0,
"logps/rejected": -159.0,
"loss": 43.723,
"rewards/accuracies": 0.4937500059604645,
"rewards/chosen": 0.1279296875,
"rewards/margins": 0.020751953125,
"rewards/rejected": 0.107421875,
"step": 290
},
{
"epoch": 1.290409301700383,
"grad_norm": 61.855899810791016,
"learning_rate": 1.6774776555733028e-07,
"logits/chosen": -1.046875,
"logits/rejected": -1.0859375,
"logps/chosen": -175.0,
"logps/rejected": -158.0,
"loss": 43.8781,
"rewards/accuracies": 0.49687498807907104,
"rewards/chosen": 0.126953125,
"rewards/margins": 0.01611328125,
"rewards/rejected": 0.1103515625,
"step": 300
},
{
"epoch": 1.290409301700383,
"eval_logits/chosen": -1.0703125,
"eval_logits/rejected": -1.0859375,
"eval_logps/chosen": -171.0,
"eval_logps/rejected": -161.0,
"eval_loss": 0.6858682036399841,
"eval_rewards/accuracies": 0.46946439146995544,
"eval_rewards/chosen": 0.1328125,
"eval_rewards/margins": 0.0152587890625,
"eval_rewards/rejected": 0.11767578125,
"eval_runtime": 2134.9779,
"eval_samples_per_second": 2.867,
"eval_steps_per_second": 0.717,
"step": 300
},
{
"epoch": 1.3334229450903958,
"grad_norm": 67.3431625366211,
"learning_rate": 1.5021198698108036e-07,
"logits/chosen": -1.0546875,
"logits/rejected": -1.078125,
"logps/chosen": -190.0,
"logps/rejected": -166.0,
"loss": 43.9168,
"rewards/accuracies": 0.4609375,
"rewards/chosen": 0.1396484375,
"rewards/margins": 0.01483154296875,
"rewards/rejected": 0.12451171875,
"step": 310
},
{
"epoch": 1.3764365884804086,
"grad_norm": 72.8765640258789,
"learning_rate": 1.3324231809189983e-07,
"logits/chosen": -1.0625,
"logits/rejected": -1.078125,
"logps/chosen": -190.0,
"logps/rejected": -165.0,
"loss": 43.6699,
"rewards/accuracies": 0.520312488079071,
"rewards/chosen": 0.146484375,
"rewards/margins": 0.023193359375,
"rewards/rejected": 0.123046875,
"step": 320
},
{
"epoch": 1.4194502318704214,
"grad_norm": 66.81339263916016,
"learning_rate": 1.1693502991126608e-07,
"logits/chosen": -1.0625,
"logits/rejected": -1.109375,
"logps/chosen": -181.0,
"logps/rejected": -152.0,
"loss": 43.5453,
"rewards/accuracies": 0.5015624761581421,
"rewards/chosen": 0.1474609375,
"rewards/margins": 0.027099609375,
"rewards/rejected": 0.12060546875,
"step": 330
},
{
"epoch": 1.4624638752604342,
"grad_norm": 61.55015563964844,
"learning_rate": 1.0138263569332267e-07,
"logits/chosen": -1.0546875,
"logits/rejected": -1.0703125,
"logps/chosen": -186.0,
"logps/rejected": -152.0,
"loss": 43.5445,
"rewards/accuracies": 0.5375000238418579,
"rewards/chosen": 0.15234375,
"rewards/margins": 0.0263671875,
"rewards/rejected": 0.1259765625,
"step": 340
},
{
"epoch": 1.505477518650447,
"grad_norm": 66.9729232788086,
"learning_rate": 8.667336608579487e-08,
"logits/chosen": -1.078125,
"logits/rejected": -1.1015625,
"logps/chosen": -175.0,
"logps/rejected": -151.0,
"loss": 43.4664,
"rewards/accuracies": 0.5328124761581421,
"rewards/chosen": 0.1552734375,
"rewards/margins": 0.0291748046875,
"rewards/rejected": 0.1259765625,
"step": 350
},
{
"epoch": 1.5484911620404596,
"grad_norm": 60.46580505371094,
"learning_rate": 7.28906685866599e-08,
"logits/chosen": -1.078125,
"logits/rejected": -1.1015625,
"logps/chosen": -180.0,
"logps/rejected": -160.0,
"loss": 43.6453,
"rewards/accuracies": 0.5218750238418579,
"rewards/chosen": 0.1552734375,
"rewards/margins": 0.022705078125,
"rewards/rejected": 0.1318359375,
"step": 360
},
{
"epoch": 1.5915048054304726,
"grad_norm": 63.649444580078125,
"learning_rate": 6.01127341362138e-08,
"logits/chosen": -1.078125,
"logits/rejected": -1.1015625,
"logps/chosen": -172.0,
"logps/rejected": -151.0,
"loss": 43.7188,
"rewards/accuracies": 0.5062500238418579,
"rewards/chosen": 0.154296875,
"rewards/margins": 0.02197265625,
"rewards/rejected": 0.1328125,
"step": 370
},
{
"epoch": 1.6345184488204851,
"grad_norm": 64.85546875,
"learning_rate": 4.841205353023714e-08,
"logits/chosen": -1.0703125,
"logits/rejected": -1.09375,
"logps/chosen": -173.0,
"logps/rejected": -155.0,
"loss": 43.8621,
"rewards/accuracies": 0.48124998807907104,
"rewards/chosen": 0.154296875,
"rewards/margins": 0.018310546875,
"rewards/rejected": 0.1357421875,
"step": 380
},
{
"epoch": 1.6775320922104981,
"grad_norm": 65.72016143798828,
"learning_rate": 3.785500617078424e-08,
"logits/chosen": -1.0625,
"logits/rejected": -1.078125,
"logps/chosen": -177.0,
"logps/rejected": -150.0,
"loss": 43.6246,
"rewards/accuracies": 0.5140625238418579,
"rewards/chosen": 0.1552734375,
"rewards/margins": 0.0245361328125,
"rewards/rejected": 0.130859375,
"step": 390
},
{
"epoch": 1.7205457356005107,
"grad_norm": 61.353736877441406,
"learning_rate": 2.850148348765921e-08,
"logits/chosen": -1.0703125,
"logits/rejected": -1.1015625,
"logps/chosen": -171.0,
"logps/rejected": -150.0,
"loss": 43.7996,
"rewards/accuracies": 0.49531251192092896,
"rewards/chosen": 0.15625,
"rewards/margins": 0.018310546875,
"rewards/rejected": 0.138671875,
"step": 400
},
{
"epoch": 1.7205457356005107,
"eval_logits/chosen": -1.0703125,
"eval_logits/rejected": -1.0859375,
"eval_logps/chosen": -171.0,
"eval_logps/rejected": -161.0,
"eval_loss": 0.6854622960090637,
"eval_rewards/accuracies": 0.48187458515167236,
"eval_rewards/chosen": 0.1552734375,
"eval_rewards/margins": 0.0166015625,
"eval_rewards/rejected": 0.138671875,
"eval_runtime": 2129.0581,
"eval_samples_per_second": 2.875,
"eval_steps_per_second": 0.719,
"step": 400
},
{
"epoch": 1.7635593789905235,
"grad_norm": 67.52031707763672,
"learning_rate": 2.0404549166959718e-08,
"logits/chosen": -1.078125,
"logits/rejected": -1.1015625,
"logps/chosen": -183.0,
"logps/rejected": -149.0,
"loss": 43.5926,
"rewards/accuracies": 0.5234375,
"rewards/chosen": 0.1611328125,
"rewards/margins": 0.0257568359375,
"rewards/rejected": 0.134765625,
"step": 410
},
{
"epoch": 1.8065730223805363,
"grad_norm": 63.740577697753906,
"learning_rate": 1.3610138114250519e-08,
"logits/chosen": -1.0703125,
"logits/rejected": -1.1015625,
"logps/chosen": -174.0,
"logps/rejected": -155.0,
"loss": 43.6965,
"rewards/accuracies": 0.4859375059604645,
"rewards/chosen": 0.158203125,
"rewards/margins": 0.0225830078125,
"rewards/rejected": 0.1357421875,
"step": 420
},
{
"epoch": 1.849586665770549,
"grad_norm": 65.15459442138672,
"learning_rate": 8.156795860187027e-09,
"logits/chosen": -1.078125,
"logits/rejected": -1.1171875,
"logps/chosen": -184.0,
"logps/rejected": -157.0,
"loss": 43.6516,
"rewards/accuracies": 0.4937500059604645,
"rewards/chosen": 0.1591796875,
"rewards/margins": 0.0234375,
"rewards/rejected": 0.1357421875,
"step": 430
},
{
"epoch": 1.8926003091605619,
"grad_norm": 67.16203308105469,
"learning_rate": 4.075459886973082e-09,
"logits/chosen": -1.0546875,
"logits/rejected": -1.09375,
"logps/chosen": -179.0,
"logps/rejected": -145.0,
"loss": 43.5895,
"rewards/accuracies": 0.5171874761581421,
"rewards/chosen": 0.1611328125,
"rewards/margins": 0.0262451171875,
"rewards/rejected": 0.134765625,
"step": 440
},
{
"epoch": 1.9356139525505747,
"grad_norm": 64.06181335449219,
"learning_rate": 1.3892841162143899e-09,
"logits/chosen": -1.078125,
"logits/rejected": -1.1015625,
"logps/chosen": -183.0,
"logps/rejected": -149.0,
"loss": 43.5109,
"rewards/accuracies": 0.526562511920929,
"rewards/chosen": 0.162109375,
"rewards/margins": 0.0274658203125,
"rewards/rejected": 0.134765625,
"step": 450
},
{
"epoch": 1.9786275959405875,
"grad_norm": 71.79183959960938,
"learning_rate": 1.1350755386951849e-10,
"logits/chosen": -1.0859375,
"logits/rejected": -1.1171875,
"logps/chosen": -182.0,
"logps/rejected": -155.0,
"loss": 43.7199,
"rewards/accuracies": 0.5328124761581421,
"rewards/chosen": 0.1591796875,
"rewards/margins": 0.0223388671875,
"rewards/rejected": 0.13671875,
"step": 460
},
{
"epoch": 1.9958330532965927,
"step": 464,
"total_flos": 0.0,
"train_loss": 43.948444234913794,
"train_runtime": 23482.907,
"train_samples_per_second": 1.267,
"train_steps_per_second": 0.02
}
],
"logging_steps": 10,
"max_steps": 464,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}