prxy5608's picture
Training in progress, step 151, checkpoint
88a33b6 verified
{
"best_metric": 0.9624110460281372,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 3.0049751243781095,
"eval_steps": 50,
"global_step": 151,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01990049751243781,
"grad_norm": 5.543930530548096,
"learning_rate": 1e-05,
"loss": 5.5751,
"step": 1
},
{
"epoch": 0.01990049751243781,
"eval_loss": 9.599178314208984,
"eval_runtime": 6.162,
"eval_samples_per_second": 13.794,
"eval_steps_per_second": 3.57,
"step": 1
},
{
"epoch": 0.03980099502487562,
"grad_norm": 5.9391279220581055,
"learning_rate": 2e-05,
"loss": 6.2605,
"step": 2
},
{
"epoch": 0.05970149253731343,
"grad_norm": 10.055806159973145,
"learning_rate": 3e-05,
"loss": 6.6465,
"step": 3
},
{
"epoch": 0.07960199004975124,
"grad_norm": 24.726375579833984,
"learning_rate": 4e-05,
"loss": 9.6417,
"step": 4
},
{
"epoch": 0.09950248756218906,
"grad_norm": 21.94843292236328,
"learning_rate": 5e-05,
"loss": 9.1886,
"step": 5
},
{
"epoch": 0.11940298507462686,
"grad_norm": 17.2749080657959,
"learning_rate": 6e-05,
"loss": 7.9442,
"step": 6
},
{
"epoch": 0.13930348258706468,
"grad_norm": 20.374170303344727,
"learning_rate": 7e-05,
"loss": 5.9546,
"step": 7
},
{
"epoch": 0.15920398009950248,
"grad_norm": 14.176255226135254,
"learning_rate": 8e-05,
"loss": 3.7086,
"step": 8
},
{
"epoch": 0.1791044776119403,
"grad_norm": 8.863327026367188,
"learning_rate": 9e-05,
"loss": 1.8624,
"step": 9
},
{
"epoch": 0.19900497512437812,
"grad_norm": 7.542079925537109,
"learning_rate": 0.0001,
"loss": 5.9968,
"step": 10
},
{
"epoch": 0.21890547263681592,
"grad_norm": 15.19139289855957,
"learning_rate": 9.998758966336295e-05,
"loss": 8.1119,
"step": 11
},
{
"epoch": 0.23880597014925373,
"grad_norm": 18.45025634765625,
"learning_rate": 9.995036481411004e-05,
"loss": 8.9081,
"step": 12
},
{
"epoch": 0.25870646766169153,
"grad_norm": 12.949933052062988,
"learning_rate": 9.988834393115767e-05,
"loss": 4.0368,
"step": 13
},
{
"epoch": 0.27860696517412936,
"grad_norm": 12.047260284423828,
"learning_rate": 9.980155780250727e-05,
"loss": 4.0461,
"step": 14
},
{
"epoch": 0.29850746268656714,
"grad_norm": 10.137738227844238,
"learning_rate": 9.969004950996175e-05,
"loss": 3.5263,
"step": 15
},
{
"epoch": 0.31840796019900497,
"grad_norm": 7.845066547393799,
"learning_rate": 9.9553874407739e-05,
"loss": 1.8586,
"step": 16
},
{
"epoch": 0.3383084577114428,
"grad_norm": 1.7427806854248047,
"learning_rate": 9.939310009499348e-05,
"loss": 0.4681,
"step": 17
},
{
"epoch": 0.3582089552238806,
"grad_norm": 1.3990861177444458,
"learning_rate": 9.92078063822589e-05,
"loss": 0.3307,
"step": 18
},
{
"epoch": 0.3781094527363184,
"grad_norm": 0.9510717988014221,
"learning_rate": 9.899808525182935e-05,
"loss": 0.148,
"step": 19
},
{
"epoch": 0.39800995024875624,
"grad_norm": 0.7023572325706482,
"learning_rate": 9.876404081209796e-05,
"loss": 0.0863,
"step": 20
},
{
"epoch": 0.417910447761194,
"grad_norm": 2.599980354309082,
"learning_rate": 9.850578924587614e-05,
"loss": 0.8357,
"step": 21
},
{
"epoch": 0.43781094527363185,
"grad_norm": 8.273324966430664,
"learning_rate": 9.822345875271883e-05,
"loss": 3.8429,
"step": 22
},
{
"epoch": 0.4577114427860697,
"grad_norm": 10.221076965332031,
"learning_rate": 9.791718948528458e-05,
"loss": 4.1774,
"step": 23
},
{
"epoch": 0.47761194029850745,
"grad_norm": 10.803452491760254,
"learning_rate": 9.758713347976179e-05,
"loss": 4.457,
"step": 24
},
{
"epoch": 0.4975124378109453,
"grad_norm": 9.160074234008789,
"learning_rate": 9.723345458039594e-05,
"loss": 2.9867,
"step": 25
},
{
"epoch": 0.5174129353233831,
"grad_norm": 4.31099796295166,
"learning_rate": 9.685632835815518e-05,
"loss": 2.5178,
"step": 26
},
{
"epoch": 0.5373134328358209,
"grad_norm": 3.5618479251861572,
"learning_rate": 9.645594202357439e-05,
"loss": 2.5185,
"step": 27
},
{
"epoch": 0.5572139303482587,
"grad_norm": 2.6641221046447754,
"learning_rate": 9.603249433382144e-05,
"loss": 1.2579,
"step": 28
},
{
"epoch": 0.5771144278606966,
"grad_norm": 1.0730020999908447,
"learning_rate": 9.558619549403147e-05,
"loss": 0.3654,
"step": 29
},
{
"epoch": 0.5970149253731343,
"grad_norm": 0.2987671196460724,
"learning_rate": 9.511726705295817e-05,
"loss": 0.0464,
"step": 30
},
{
"epoch": 0.6169154228855721,
"grad_norm": 0.8347283005714417,
"learning_rate": 9.462594179299406e-05,
"loss": 0.2107,
"step": 31
},
{
"epoch": 0.6368159203980099,
"grad_norm": 0.8029500246047974,
"learning_rate": 9.41124636146141e-05,
"loss": 0.179,
"step": 32
},
{
"epoch": 0.6567164179104478,
"grad_norm": 1.6079643964767456,
"learning_rate": 9.357708741530025e-05,
"loss": 0.5489,
"step": 33
},
{
"epoch": 0.6766169154228856,
"grad_norm": 7.646543502807617,
"learning_rate": 9.302007896300698e-05,
"loss": 3.2044,
"step": 34
},
{
"epoch": 0.6965174129353234,
"grad_norm": 8.641079902648926,
"learning_rate": 9.244171476423037e-05,
"loss": 3.0777,
"step": 35
},
{
"epoch": 0.7164179104477612,
"grad_norm": 11.796111106872559,
"learning_rate": 9.184228192674667e-05,
"loss": 4.0429,
"step": 36
},
{
"epoch": 0.736318407960199,
"grad_norm": 5.918362617492676,
"learning_rate": 9.122207801708802e-05,
"loss": 2.5737,
"step": 37
},
{
"epoch": 0.7562189054726368,
"grad_norm": 6.796441078186035,
"learning_rate": 9.058141091282656e-05,
"loss": 2.2759,
"step": 38
},
{
"epoch": 0.7761194029850746,
"grad_norm": 5.166509628295898,
"learning_rate": 8.992059864973972e-05,
"loss": 2.3741,
"step": 39
},
{
"epoch": 0.7960199004975125,
"grad_norm": 9.372264862060547,
"learning_rate": 8.923996926393305e-05,
"loss": 0.6126,
"step": 40
},
{
"epoch": 0.8159203980099502,
"grad_norm": 0.7963209748268127,
"learning_rate": 8.853986062899868e-05,
"loss": 0.2505,
"step": 41
},
{
"epoch": 0.835820895522388,
"grad_norm": 0.8974098563194275,
"learning_rate": 8.782062028829028e-05,
"loss": 0.3499,
"step": 42
},
{
"epoch": 0.8557213930348259,
"grad_norm": 0.8836018443107605,
"learning_rate": 8.708260528239788e-05,
"loss": 0.336,
"step": 43
},
{
"epoch": 0.8756218905472637,
"grad_norm": 0.41947388648986816,
"learning_rate": 8.632618197190816e-05,
"loss": 0.0833,
"step": 44
},
{
"epoch": 0.8955223880597015,
"grad_norm": 0.6316033601760864,
"learning_rate": 8.555172585553805e-05,
"loss": 0.1746,
"step": 45
},
{
"epoch": 0.9154228855721394,
"grad_norm": 3.0872931480407715,
"learning_rate": 8.475962138373213e-05,
"loss": 1.8755,
"step": 46
},
{
"epoch": 0.9353233830845771,
"grad_norm": 5.99851655960083,
"learning_rate": 8.395026176781627e-05,
"loss": 3.2634,
"step": 47
},
{
"epoch": 0.9552238805970149,
"grad_norm": 8.195245742797852,
"learning_rate": 8.312404878480222e-05,
"loss": 3.2091,
"step": 48
},
{
"epoch": 0.9751243781094527,
"grad_norm": 2.1140687465667725,
"learning_rate": 8.228139257794012e-05,
"loss": 1.2906,
"step": 49
},
{
"epoch": 0.9950248756218906,
"grad_norm": 2.3306195735931396,
"learning_rate": 8.142271145311783e-05,
"loss": 1.0865,
"step": 50
},
{
"epoch": 0.9950248756218906,
"eval_loss": 1.0756102800369263,
"eval_runtime": 6.3397,
"eval_samples_per_second": 13.408,
"eval_steps_per_second": 3.47,
"step": 50
},
{
"epoch": 1.0149253731343284,
"grad_norm": 9.008943557739258,
"learning_rate": 8.054843167120827e-05,
"loss": 3.9621,
"step": 51
},
{
"epoch": 1.0348258706467661,
"grad_norm": 2.001354455947876,
"learning_rate": 7.965898723646776e-05,
"loss": 1.5088,
"step": 52
},
{
"epoch": 1.054726368159204,
"grad_norm": 2.112483024597168,
"learning_rate": 7.875481968109052e-05,
"loss": 1.5239,
"step": 53
},
{
"epoch": 1.0746268656716418,
"grad_norm": 1.9544909000396729,
"learning_rate": 7.783637784602609e-05,
"loss": 0.975,
"step": 54
},
{
"epoch": 1.0945273631840795,
"grad_norm": 0.5338630676269531,
"learning_rate": 7.690411765816864e-05,
"loss": 0.1268,
"step": 55
},
{
"epoch": 1.1144278606965174,
"grad_norm": 0.5618958473205566,
"learning_rate": 7.595850190402876e-05,
"loss": 0.1182,
"step": 56
},
{
"epoch": 1.1343283582089552,
"grad_norm": 0.6160061955451965,
"learning_rate": 7.500000000000001e-05,
"loss": 0.144,
"step": 57
},
{
"epoch": 1.154228855721393,
"grad_norm": 0.5106895565986633,
"learning_rate": 7.402908775933419e-05,
"loss": 0.0888,
"step": 58
},
{
"epoch": 1.1741293532338308,
"grad_norm": 0.39127403497695923,
"learning_rate": 7.304624715594139e-05,
"loss": 0.0848,
"step": 59
},
{
"epoch": 1.1940298507462686,
"grad_norm": 3.96272611618042,
"learning_rate": 7.205196608513159e-05,
"loss": 1.3122,
"step": 60
},
{
"epoch": 1.2139303482587065,
"grad_norm": 4.137331008911133,
"learning_rate": 7.104673812141675e-05,
"loss": 2.0647,
"step": 61
},
{
"epoch": 1.2338308457711442,
"grad_norm": 9.125043869018555,
"learning_rate": 7.003106227349399e-05,
"loss": 2.4258,
"step": 62
},
{
"epoch": 1.2537313432835822,
"grad_norm": 4.514773845672607,
"learning_rate": 6.900544273653074e-05,
"loss": 2.0476,
"step": 63
},
{
"epoch": 1.2736318407960199,
"grad_norm": 2.9408376216888428,
"learning_rate": 6.797038864187564e-05,
"loss": 1.5427,
"step": 64
},
{
"epoch": 1.2935323383084576,
"grad_norm": 2.8879308700561523,
"learning_rate": 6.692641380431879e-05,
"loss": 1.7285,
"step": 65
},
{
"epoch": 1.3134328358208955,
"grad_norm": 1.3168548345565796,
"learning_rate": 6.587403646702714e-05,
"loss": 0.5157,
"step": 66
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.7307369112968445,
"learning_rate": 6.481377904428171e-05,
"loss": 0.138,
"step": 67
},
{
"epoch": 1.3532338308457712,
"grad_norm": 0.3926345705986023,
"learning_rate": 6.374616786214402e-05,
"loss": 0.0664,
"step": 68
},
{
"epoch": 1.373134328358209,
"grad_norm": 0.3398008346557617,
"learning_rate": 6.26717328971808e-05,
"loss": 0.0546,
"step": 69
},
{
"epoch": 1.3930348258706466,
"grad_norm": 0.7794240713119507,
"learning_rate": 6.159100751337642e-05,
"loss": 0.1423,
"step": 70
},
{
"epoch": 1.4129353233830846,
"grad_norm": 0.28816598653793335,
"learning_rate": 6.0504528197363894e-05,
"loss": 0.0374,
"step": 71
},
{
"epoch": 1.4328358208955223,
"grad_norm": 2.6052889823913574,
"learning_rate": 5.9412834292105676e-05,
"loss": 1.3742,
"step": 72
},
{
"epoch": 1.4527363184079602,
"grad_norm": 5.297976970672607,
"learning_rate": 5.831646772915651e-05,
"loss": 2.3129,
"step": 73
},
{
"epoch": 1.472636815920398,
"grad_norm": 6.222244739532471,
"learning_rate": 5.721597275964133e-05,
"loss": 2.4451,
"step": 74
},
{
"epoch": 1.4925373134328357,
"grad_norm": 3.1612331867218018,
"learning_rate": 5.6111895684081725e-05,
"loss": 1.5285,
"step": 75
},
{
"epoch": 1.5124378109452736,
"grad_norm": 2.373920440673828,
"learning_rate": 5.5004784581204927e-05,
"loss": 1.4736,
"step": 76
},
{
"epoch": 1.5323383084577116,
"grad_norm": 2.493252992630005,
"learning_rate": 5.389518903587017e-05,
"loss": 1.5259,
"step": 77
},
{
"epoch": 1.5522388059701493,
"grad_norm": 2.429081678390503,
"learning_rate": 5.2783659866247424e-05,
"loss": 1.6323,
"step": 78
},
{
"epoch": 1.572139303482587,
"grad_norm": 0.9949527382850647,
"learning_rate": 5.167074885038373e-05,
"loss": 0.3188,
"step": 79
},
{
"epoch": 1.5920398009950247,
"grad_norm": 0.5965785980224609,
"learning_rate": 5.055700845229327e-05,
"loss": 0.1503,
"step": 80
},
{
"epoch": 1.6119402985074627,
"grad_norm": 0.5434532165527344,
"learning_rate": 4.944299154770673e-05,
"loss": 0.094,
"step": 81
},
{
"epoch": 1.6318407960199006,
"grad_norm": 0.7457742691040039,
"learning_rate": 4.832925114961629e-05,
"loss": 0.164,
"step": 82
},
{
"epoch": 1.6517412935323383,
"grad_norm": 0.3958088457584381,
"learning_rate": 4.72163401337526e-05,
"loss": 0.0606,
"step": 83
},
{
"epoch": 1.671641791044776,
"grad_norm": 1.8904391527175903,
"learning_rate": 4.610481096412984e-05,
"loss": 0.9604,
"step": 84
},
{
"epoch": 1.6915422885572138,
"grad_norm": 4.544404029846191,
"learning_rate": 4.4995215418795085e-05,
"loss": 2.215,
"step": 85
},
{
"epoch": 1.7114427860696517,
"grad_norm": 5.509747505187988,
"learning_rate": 4.388810431591829e-05,
"loss": 2.0638,
"step": 86
},
{
"epoch": 1.7313432835820897,
"grad_norm": 2.859410047531128,
"learning_rate": 4.278402724035867e-05,
"loss": 1.693,
"step": 87
},
{
"epoch": 1.7512437810945274,
"grad_norm": 2.292931079864502,
"learning_rate": 4.1683532270843504e-05,
"loss": 1.3341,
"step": 88
},
{
"epoch": 1.771144278606965,
"grad_norm": 2.5066726207733154,
"learning_rate": 4.058716570789433e-05,
"loss": 1.4565,
"step": 89
},
{
"epoch": 1.7910447761194028,
"grad_norm": 1.7927272319793701,
"learning_rate": 3.94954718026361e-05,
"loss": 0.892,
"step": 90
},
{
"epoch": 1.8109452736318408,
"grad_norm": 0.5884804725646973,
"learning_rate": 3.840899248662358e-05,
"loss": 0.1595,
"step": 91
},
{
"epoch": 1.8308457711442787,
"grad_norm": 1.1309645175933838,
"learning_rate": 3.7328267102819225e-05,
"loss": 0.2703,
"step": 92
},
{
"epoch": 1.8507462686567164,
"grad_norm": 0.31311914324760437,
"learning_rate": 3.6253832137856e-05,
"loss": 0.0479,
"step": 93
},
{
"epoch": 1.8706467661691542,
"grad_norm": 0.5945075154304504,
"learning_rate": 3.5186220955718306e-05,
"loss": 0.1501,
"step": 94
},
{
"epoch": 1.890547263681592,
"grad_norm": 1.0878932476043701,
"learning_rate": 3.4125963532972873e-05,
"loss": 0.4793,
"step": 95
},
{
"epoch": 1.9104477611940298,
"grad_norm": 3.067518472671509,
"learning_rate": 3.307358619568123e-05,
"loss": 1.7691,
"step": 96
},
{
"epoch": 1.9303482587064678,
"grad_norm": 3.476691722869873,
"learning_rate": 3.202961135812437e-05,
"loss": 1.928,
"step": 97
},
{
"epoch": 1.9502487562189055,
"grad_norm": 4.594018459320068,
"learning_rate": 3.0994557263469265e-05,
"loss": 2.3617,
"step": 98
},
{
"epoch": 1.9701492537313432,
"grad_norm": 2.5807111263275146,
"learning_rate": 2.996893772650602e-05,
"loss": 1.4338,
"step": 99
},
{
"epoch": 1.9900497512437811,
"grad_norm": 0.2568559944629669,
"learning_rate": 2.895326187858326e-05,
"loss": 0.0345,
"step": 100
},
{
"epoch": 1.9900497512437811,
"eval_loss": 0.9624110460281372,
"eval_runtime": 6.3475,
"eval_samples_per_second": 13.391,
"eval_steps_per_second": 3.466,
"step": 100
},
{
"epoch": 2.009950248756219,
"grad_norm": 15.313194274902344,
"learning_rate": 2.7948033914868415e-05,
"loss": 3.6961,
"step": 101
},
{
"epoch": 2.029850746268657,
"grad_norm": 1.6831941604614258,
"learning_rate": 2.69537528440586e-05,
"loss": 0.9623,
"step": 102
},
{
"epoch": 2.0497512437810945,
"grad_norm": 1.7447772026062012,
"learning_rate": 2.5970912240665813e-05,
"loss": 1.0607,
"step": 103
},
{
"epoch": 2.0696517412935322,
"grad_norm": 1.8062071800231934,
"learning_rate": 2.500000000000001e-05,
"loss": 1.0382,
"step": 104
},
{
"epoch": 2.08955223880597,
"grad_norm": 0.533903181552887,
"learning_rate": 2.4041498095971253e-05,
"loss": 0.0953,
"step": 105
},
{
"epoch": 2.109452736318408,
"grad_norm": 0.41501080989837646,
"learning_rate": 2.3095882341831372e-05,
"loss": 0.0889,
"step": 106
},
{
"epoch": 2.129353233830846,
"grad_norm": 0.605328381061554,
"learning_rate": 2.216362215397393e-05,
"loss": 0.1302,
"step": 107
},
{
"epoch": 2.1492537313432836,
"grad_norm": 0.3093937039375305,
"learning_rate": 2.124518031890948e-05,
"loss": 0.0564,
"step": 108
},
{
"epoch": 2.1691542288557213,
"grad_norm": 0.321236252784729,
"learning_rate": 2.0341012763532243e-05,
"loss": 0.0479,
"step": 109
},
{
"epoch": 2.189054726368159,
"grad_norm": 1.6769386529922485,
"learning_rate": 1.945156832879174e-05,
"loss": 0.6905,
"step": 110
},
{
"epoch": 2.208955223880597,
"grad_norm": 3.2930898666381836,
"learning_rate": 1.8577288546882167e-05,
"loss": 1.2683,
"step": 111
},
{
"epoch": 2.228855721393035,
"grad_norm": 3.6497483253479004,
"learning_rate": 1.771860742205988e-05,
"loss": 1.4517,
"step": 112
},
{
"epoch": 2.2487562189054726,
"grad_norm": 2.7759523391723633,
"learning_rate": 1.687595121519778e-05,
"loss": 1.2682,
"step": 113
},
{
"epoch": 2.2686567164179103,
"grad_norm": 1.7020666599273682,
"learning_rate": 1.604973823218376e-05,
"loss": 0.8624,
"step": 114
},
{
"epoch": 2.288557213930348,
"grad_norm": 2.058931589126587,
"learning_rate": 1.5240378616267886e-05,
"loss": 0.9802,
"step": 115
},
{
"epoch": 2.308457711442786,
"grad_norm": 1.6504006385803223,
"learning_rate": 1.4448274144461965e-05,
"loss": 0.626,
"step": 116
},
{
"epoch": 2.328358208955224,
"grad_norm": 0.36916500329971313,
"learning_rate": 1.367381802809185e-05,
"loss": 0.0499,
"step": 117
},
{
"epoch": 2.3482587064676617,
"grad_norm": 0.4427722692489624,
"learning_rate": 1.2917394717602121e-05,
"loss": 0.0567,
"step": 118
},
{
"epoch": 2.3681592039800994,
"grad_norm": 0.24329277873039246,
"learning_rate": 1.2179379711709737e-05,
"loss": 0.0243,
"step": 119
},
{
"epoch": 2.388059701492537,
"grad_norm": 0.7087410688400269,
"learning_rate": 1.1460139371001338e-05,
"loss": 0.155,
"step": 120
},
{
"epoch": 2.4079601990049753,
"grad_norm": 0.24490374326705933,
"learning_rate": 1.0760030736066951e-05,
"loss": 0.011,
"step": 121
},
{
"epoch": 2.427860696517413,
"grad_norm": 2.3325791358947754,
"learning_rate": 1.0079401350260287e-05,
"loss": 0.6917,
"step": 122
},
{
"epoch": 2.4477611940298507,
"grad_norm": 3.353835344314575,
"learning_rate": 9.41858908717344e-06,
"loss": 1.2289,
"step": 123
},
{
"epoch": 2.4676616915422884,
"grad_norm": 4.807076930999756,
"learning_rate": 8.777921982911996e-06,
"loss": 1.6531,
"step": 124
},
{
"epoch": 2.487562189054726,
"grad_norm": 3.2822203636169434,
"learning_rate": 8.157718073253351e-06,
"loss": 1.3298,
"step": 125
},
{
"epoch": 2.5074626865671643,
"grad_norm": 2.1589744091033936,
"learning_rate": 7.558285235769646e-06,
"loss": 0.8649,
"step": 126
},
{
"epoch": 2.527363184079602,
"grad_norm": 2.269338607788086,
"learning_rate": 6.979921036993042e-06,
"loss": 1.0175,
"step": 127
},
{
"epoch": 2.5472636815920398,
"grad_norm": 1.7215580940246582,
"learning_rate": 6.422912584699753e-06,
"loss": 0.4796,
"step": 128
},
{
"epoch": 2.5671641791044775,
"grad_norm": 0.5773996710777283,
"learning_rate": 5.887536385385917e-06,
"loss": 0.0872,
"step": 129
},
{
"epoch": 2.587064676616915,
"grad_norm": 0.3628270924091339,
"learning_rate": 5.374058207005944e-06,
"loss": 0.0363,
"step": 130
},
{
"epoch": 2.6069651741293534,
"grad_norm": 0.5935407876968384,
"learning_rate": 4.882732947041818e-06,
"loss": 0.1109,
"step": 131
},
{
"epoch": 2.626865671641791,
"grad_norm": 0.3369639217853546,
"learning_rate": 4.413804505968533e-06,
"loss": 0.0402,
"step": 132
},
{
"epoch": 2.646766169154229,
"grad_norm": 0.4762958288192749,
"learning_rate": 3.967505666178556e-06,
"loss": 0.0557,
"step": 133
},
{
"epoch": 2.6666666666666665,
"grad_norm": 2.259021759033203,
"learning_rate": 3.544057976425619e-06,
"loss": 0.8966,
"step": 134
},
{
"epoch": 2.6865671641791042,
"grad_norm": 3.387164831161499,
"learning_rate": 3.1436716418448307e-06,
"loss": 1.3748,
"step": 135
},
{
"epoch": 2.7064676616915424,
"grad_norm": 4.192432880401611,
"learning_rate": 2.7665454196040664e-06,
"loss": 1.4131,
"step": 136
},
{
"epoch": 2.72636815920398,
"grad_norm": 2.7902915477752686,
"learning_rate": 2.4128665202382326e-06,
"loss": 1.1285,
"step": 137
},
{
"epoch": 2.746268656716418,
"grad_norm": 2.0142769813537598,
"learning_rate": 2.0828105147154273e-06,
"loss": 0.7524,
"step": 138
},
{
"epoch": 2.7661691542288556,
"grad_norm": 2.4600789546966553,
"learning_rate": 1.7765412472811771e-06,
"loss": 1.0153,
"step": 139
},
{
"epoch": 2.7860696517412933,
"grad_norm": 2.070169448852539,
"learning_rate": 1.4942107541238704e-06,
"loss": 0.8548,
"step": 140
},
{
"epoch": 2.8059701492537314,
"grad_norm": 0.9298514127731323,
"learning_rate": 1.2359591879020526e-06,
"loss": 0.2304,
"step": 141
},
{
"epoch": 2.825870646766169,
"grad_norm": 0.287707656621933,
"learning_rate": 1.0019147481706625e-06,
"loss": 0.0342,
"step": 142
},
{
"epoch": 2.845771144278607,
"grad_norm": 0.24002987146377563,
"learning_rate": 7.921936177411049e-07,
"loss": 0.0208,
"step": 143
},
{
"epoch": 2.8656716417910446,
"grad_norm": 0.5002749562263489,
"learning_rate": 6.06899905006525e-07,
"loss": 0.0661,
"step": 144
},
{
"epoch": 2.8855721393034823,
"grad_norm": 0.4782179594039917,
"learning_rate": 4.461255922609986e-07,
"loss": 0.088,
"step": 145
},
{
"epoch": 2.9054726368159205,
"grad_norm": 1.0259336233139038,
"learning_rate": 3.0995049003826325e-07,
"loss": 0.2354,
"step": 146
},
{
"epoch": 2.925373134328358,
"grad_norm": 4.005832195281982,
"learning_rate": 1.984421974927375e-07,
"loss": 1.3838,
"step": 147
},
{
"epoch": 2.945273631840796,
"grad_norm": 3.5791056156158447,
"learning_rate": 1.1165606884234181e-07,
"loss": 1.5911,
"step": 148
},
{
"epoch": 2.965174129353234,
"grad_norm": 3.579153299331665,
"learning_rate": 4.963518588996796e-08,
"loss": 1.3113,
"step": 149
},
{
"epoch": 2.9850746268656714,
"grad_norm": 0.6860787868499756,
"learning_rate": 1.2410336637047605e-08,
"loss": 0.1386,
"step": 150
},
{
"epoch": 2.9850746268656714,
"eval_loss": 1.0078964233398438,
"eval_runtime": 6.3591,
"eval_samples_per_second": 13.367,
"eval_steps_per_second": 3.46,
"step": 150
},
{
"epoch": 3.0049751243781095,
"grad_norm": 7.453361511230469,
"learning_rate": 0.0,
"loss": 2.4948,
"step": 151
}
],
"logging_steps": 1,
"max_steps": 151,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.2778555039062426e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}