aleegis09's picture
Training in progress, step 200, checkpoint
cba3cda verified
{
"best_metric": 1.01239013671875,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.3280032800328003,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0016400164001640015,
"grad_norm": 5.946897029876709,
"learning_rate": 1e-05,
"loss": 3.662,
"step": 1
},
{
"epoch": 0.0016400164001640015,
"eval_loss": 1.2280761003494263,
"eval_runtime": 238.766,
"eval_samples_per_second": 4.301,
"eval_steps_per_second": 1.076,
"step": 1
},
{
"epoch": 0.003280032800328003,
"grad_norm": 7.052305221557617,
"learning_rate": 2e-05,
"loss": 4.2734,
"step": 2
},
{
"epoch": 0.004920049200492005,
"grad_norm": 5.927187442779541,
"learning_rate": 3e-05,
"loss": 4.0489,
"step": 3
},
{
"epoch": 0.006560065600656006,
"grad_norm": 4.53819465637207,
"learning_rate": 4e-05,
"loss": 4.6816,
"step": 4
},
{
"epoch": 0.008200082000820008,
"grad_norm": 6.975519180297852,
"learning_rate": 5e-05,
"loss": 4.039,
"step": 5
},
{
"epoch": 0.00984009840098401,
"grad_norm": 6.51109504699707,
"learning_rate": 6e-05,
"loss": 4.4479,
"step": 6
},
{
"epoch": 0.011480114801148012,
"grad_norm": 5.128538608551025,
"learning_rate": 7e-05,
"loss": 4.2166,
"step": 7
},
{
"epoch": 0.013120131201312012,
"grad_norm": 4.6934051513671875,
"learning_rate": 8e-05,
"loss": 4.1606,
"step": 8
},
{
"epoch": 0.014760147601476014,
"grad_norm": 4.481375217437744,
"learning_rate": 9e-05,
"loss": 4.3906,
"step": 9
},
{
"epoch": 0.016400164001640016,
"grad_norm": 3.522401809692383,
"learning_rate": 0.0001,
"loss": 4.1004,
"step": 10
},
{
"epoch": 0.018040180401804017,
"grad_norm": 3.372955083847046,
"learning_rate": 9.999316524962345e-05,
"loss": 4.284,
"step": 11
},
{
"epoch": 0.01968019680196802,
"grad_norm": 3.9537601470947266,
"learning_rate": 9.997266286704631e-05,
"loss": 4.4481,
"step": 12
},
{
"epoch": 0.02132021320213202,
"grad_norm": 3.660971164703369,
"learning_rate": 9.993849845741524e-05,
"loss": 4.0816,
"step": 13
},
{
"epoch": 0.022960229602296024,
"grad_norm": 3.060683250427246,
"learning_rate": 9.989068136093873e-05,
"loss": 4.1147,
"step": 14
},
{
"epoch": 0.024600246002460024,
"grad_norm": 3.1304819583892822,
"learning_rate": 9.98292246503335e-05,
"loss": 4.5893,
"step": 15
},
{
"epoch": 0.026240262402624025,
"grad_norm": 3.06088924407959,
"learning_rate": 9.975414512725057e-05,
"loss": 4.3836,
"step": 16
},
{
"epoch": 0.02788027880278803,
"grad_norm": 3.154261589050293,
"learning_rate": 9.966546331768191e-05,
"loss": 4.4681,
"step": 17
},
{
"epoch": 0.02952029520295203,
"grad_norm": 2.9176149368286133,
"learning_rate": 9.956320346634876e-05,
"loss": 4.5006,
"step": 18
},
{
"epoch": 0.031160311603116032,
"grad_norm": 3.8102164268493652,
"learning_rate": 9.944739353007344e-05,
"loss": 4.079,
"step": 19
},
{
"epoch": 0.03280032800328003,
"grad_norm": 3.008178949356079,
"learning_rate": 9.931806517013612e-05,
"loss": 4.2798,
"step": 20
},
{
"epoch": 0.03444034440344403,
"grad_norm": 3.0751898288726807,
"learning_rate": 9.917525374361912e-05,
"loss": 4.5007,
"step": 21
},
{
"epoch": 0.03608036080360803,
"grad_norm": 2.8388233184814453,
"learning_rate": 9.901899829374047e-05,
"loss": 4.3033,
"step": 22
},
{
"epoch": 0.03772037720377204,
"grad_norm": 3.0876259803771973,
"learning_rate": 9.884934153917997e-05,
"loss": 4.3808,
"step": 23
},
{
"epoch": 0.03936039360393604,
"grad_norm": 2.7423012256622314,
"learning_rate": 9.86663298624003e-05,
"loss": 4.3329,
"step": 24
},
{
"epoch": 0.04100041000410004,
"grad_norm": 2.9184083938598633,
"learning_rate": 9.847001329696653e-05,
"loss": 4.453,
"step": 25
},
{
"epoch": 0.04264042640426404,
"grad_norm": 2.6982572078704834,
"learning_rate": 9.826044551386744e-05,
"loss": 4.3764,
"step": 26
},
{
"epoch": 0.04428044280442804,
"grad_norm": 2.7638351917266846,
"learning_rate": 9.803768380684242e-05,
"loss": 4.6981,
"step": 27
},
{
"epoch": 0.04592045920459205,
"grad_norm": 2.825589418411255,
"learning_rate": 9.780178907671789e-05,
"loss": 4.3602,
"step": 28
},
{
"epoch": 0.04756047560475605,
"grad_norm": 2.722837209701538,
"learning_rate": 9.755282581475769e-05,
"loss": 4.1382,
"step": 29
},
{
"epoch": 0.04920049200492005,
"grad_norm": 2.671046495437622,
"learning_rate": 9.729086208503174e-05,
"loss": 4.6047,
"step": 30
},
{
"epoch": 0.05084050840508405,
"grad_norm": 2.576572895050049,
"learning_rate": 9.701596950580806e-05,
"loss": 4.086,
"step": 31
},
{
"epoch": 0.05248052480524805,
"grad_norm": 2.5752434730529785,
"learning_rate": 9.672822322997305e-05,
"loss": 4.6261,
"step": 32
},
{
"epoch": 0.05412054120541206,
"grad_norm": 2.5486037731170654,
"learning_rate": 9.642770192448536e-05,
"loss": 4.2545,
"step": 33
},
{
"epoch": 0.05576055760557606,
"grad_norm": 2.501359224319458,
"learning_rate": 9.611448774886924e-05,
"loss": 4.4951,
"step": 34
},
{
"epoch": 0.05740057400574006,
"grad_norm": 2.560471534729004,
"learning_rate": 9.578866633275288e-05,
"loss": 4.3553,
"step": 35
},
{
"epoch": 0.05904059040590406,
"grad_norm": 2.6447341442108154,
"learning_rate": 9.545032675245813e-05,
"loss": 4.4671,
"step": 36
},
{
"epoch": 0.06068060680606806,
"grad_norm": 2.8554253578186035,
"learning_rate": 9.509956150664796e-05,
"loss": 4.7183,
"step": 37
},
{
"epoch": 0.062320623206232065,
"grad_norm": 2.5365495681762695,
"learning_rate": 9.473646649103818e-05,
"loss": 4.4201,
"step": 38
},
{
"epoch": 0.06396063960639606,
"grad_norm": 2.7254433631896973,
"learning_rate": 9.43611409721806e-05,
"loss": 4.6132,
"step": 39
},
{
"epoch": 0.06560065600656007,
"grad_norm": 2.792360305786133,
"learning_rate": 9.397368756032445e-05,
"loss": 4.1543,
"step": 40
},
{
"epoch": 0.06724067240672407,
"grad_norm": 2.74570894241333,
"learning_rate": 9.357421218136386e-05,
"loss": 4.6709,
"step": 41
},
{
"epoch": 0.06888068880688807,
"grad_norm": 2.982025623321533,
"learning_rate": 9.316282404787871e-05,
"loss": 4.287,
"step": 42
},
{
"epoch": 0.07052070520705207,
"grad_norm": 2.8786568641662598,
"learning_rate": 9.273963562927695e-05,
"loss": 4.5806,
"step": 43
},
{
"epoch": 0.07216072160721607,
"grad_norm": 2.9633281230926514,
"learning_rate": 9.230476262104677e-05,
"loss": 4.6256,
"step": 44
},
{
"epoch": 0.07380073800738007,
"grad_norm": 3.1246445178985596,
"learning_rate": 9.185832391312644e-05,
"loss": 4.7461,
"step": 45
},
{
"epoch": 0.07544075440754408,
"grad_norm": 2.9915554523468018,
"learning_rate": 9.140044155740101e-05,
"loss": 4.4783,
"step": 46
},
{
"epoch": 0.07708077080770807,
"grad_norm": 3.1274242401123047,
"learning_rate": 9.093124073433463e-05,
"loss": 4.3616,
"step": 47
},
{
"epoch": 0.07872078720787208,
"grad_norm": 3.342928409576416,
"learning_rate": 9.045084971874738e-05,
"loss": 4.7315,
"step": 48
},
{
"epoch": 0.08036080360803607,
"grad_norm": 3.4278533458709717,
"learning_rate": 8.995939984474624e-05,
"loss": 4.5714,
"step": 49
},
{
"epoch": 0.08200082000820008,
"grad_norm": 3.902034044265747,
"learning_rate": 8.945702546981969e-05,
"loss": 4.1875,
"step": 50
},
{
"epoch": 0.08200082000820008,
"eval_loss": 1.060987949371338,
"eval_runtime": 241.7631,
"eval_samples_per_second": 4.248,
"eval_steps_per_second": 1.063,
"step": 50
},
{
"epoch": 0.08364083640836409,
"grad_norm": 3.471196413040161,
"learning_rate": 8.894386393810563e-05,
"loss": 3.7904,
"step": 51
},
{
"epoch": 0.08528085280852808,
"grad_norm": 2.6738178730010986,
"learning_rate": 8.842005554284296e-05,
"loss": 3.9662,
"step": 52
},
{
"epoch": 0.08692086920869209,
"grad_norm": 2.6069202423095703,
"learning_rate": 8.788574348801675e-05,
"loss": 3.7654,
"step": 53
},
{
"epoch": 0.08856088560885608,
"grad_norm": 2.868619203567505,
"learning_rate": 8.73410738492077e-05,
"loss": 3.7073,
"step": 54
},
{
"epoch": 0.09020090200902009,
"grad_norm": 2.7494771480560303,
"learning_rate": 8.678619553365659e-05,
"loss": 3.884,
"step": 55
},
{
"epoch": 0.0918409184091841,
"grad_norm": 2.433377981185913,
"learning_rate": 8.622126023955446e-05,
"loss": 3.8217,
"step": 56
},
{
"epoch": 0.09348093480934809,
"grad_norm": 2.4099724292755127,
"learning_rate": 8.564642241456986e-05,
"loss": 3.6413,
"step": 57
},
{
"epoch": 0.0951209512095121,
"grad_norm": 2.463087320327759,
"learning_rate": 8.506183921362443e-05,
"loss": 3.7503,
"step": 58
},
{
"epoch": 0.09676096760967609,
"grad_norm": 2.5593416690826416,
"learning_rate": 8.44676704559283e-05,
"loss": 3.7841,
"step": 59
},
{
"epoch": 0.0984009840098401,
"grad_norm": 2.4289839267730713,
"learning_rate": 8.386407858128706e-05,
"loss": 4.2243,
"step": 60
},
{
"epoch": 0.1000410004100041,
"grad_norm": 2.4738149642944336,
"learning_rate": 8.32512286056924e-05,
"loss": 4.0646,
"step": 61
},
{
"epoch": 0.1016810168101681,
"grad_norm": 2.3960394859313965,
"learning_rate": 8.262928807620843e-05,
"loss": 4.1694,
"step": 62
},
{
"epoch": 0.1033210332103321,
"grad_norm": 2.4949097633361816,
"learning_rate": 8.199842702516583e-05,
"loss": 4.7137,
"step": 63
},
{
"epoch": 0.1049610496104961,
"grad_norm": 2.4538803100585938,
"learning_rate": 8.135881792367686e-05,
"loss": 4.1133,
"step": 64
},
{
"epoch": 0.1066010660106601,
"grad_norm": 2.405606746673584,
"learning_rate": 8.07106356344834e-05,
"loss": 4.506,
"step": 65
},
{
"epoch": 0.10824108241082411,
"grad_norm": 2.4630465507507324,
"learning_rate": 8.005405736415126e-05,
"loss": 4.0459,
"step": 66
},
{
"epoch": 0.1098810988109881,
"grad_norm": 2.4578747749328613,
"learning_rate": 7.938926261462366e-05,
"loss": 4.574,
"step": 67
},
{
"epoch": 0.11152111521115211,
"grad_norm": 2.3418519496917725,
"learning_rate": 7.871643313414718e-05,
"loss": 3.7993,
"step": 68
},
{
"epoch": 0.11316113161131611,
"grad_norm": 2.3230831623077393,
"learning_rate": 7.803575286758364e-05,
"loss": 4.1788,
"step": 69
},
{
"epoch": 0.11480114801148011,
"grad_norm": 2.371260404586792,
"learning_rate": 7.734740790612136e-05,
"loss": 4.263,
"step": 70
},
{
"epoch": 0.11644116441164412,
"grad_norm": 2.292912244796753,
"learning_rate": 7.66515864363997e-05,
"loss": 4.1595,
"step": 71
},
{
"epoch": 0.11808118081180811,
"grad_norm": 2.3439486026763916,
"learning_rate": 7.594847868906076e-05,
"loss": 4.416,
"step": 72
},
{
"epoch": 0.11972119721197212,
"grad_norm": 2.366844654083252,
"learning_rate": 7.52382768867422e-05,
"loss": 4.1396,
"step": 73
},
{
"epoch": 0.12136121361213612,
"grad_norm": 2.4201889038085938,
"learning_rate": 7.452117519152542e-05,
"loss": 3.9721,
"step": 74
},
{
"epoch": 0.12300123001230012,
"grad_norm": 2.362065076828003,
"learning_rate": 7.379736965185368e-05,
"loss": 4.477,
"step": 75
},
{
"epoch": 0.12464124641246413,
"grad_norm": 2.4092745780944824,
"learning_rate": 7.30670581489344e-05,
"loss": 4.6119,
"step": 76
},
{
"epoch": 0.12628126281262814,
"grad_norm": 2.300295352935791,
"learning_rate": 7.233044034264034e-05,
"loss": 4.596,
"step": 77
},
{
"epoch": 0.12792127921279212,
"grad_norm": 2.3461453914642334,
"learning_rate": 7.158771761692464e-05,
"loss": 4.741,
"step": 78
},
{
"epoch": 0.12956129561295612,
"grad_norm": 2.3217592239379883,
"learning_rate": 7.083909302476453e-05,
"loss": 4.1441,
"step": 79
},
{
"epoch": 0.13120131201312013,
"grad_norm": 2.353808641433716,
"learning_rate": 7.008477123264848e-05,
"loss": 4.0813,
"step": 80
},
{
"epoch": 0.13284132841328414,
"grad_norm": 2.4136617183685303,
"learning_rate": 6.932495846462261e-05,
"loss": 4.4561,
"step": 81
},
{
"epoch": 0.13448134481344814,
"grad_norm": 2.305370330810547,
"learning_rate": 6.855986244591104e-05,
"loss": 4.4407,
"step": 82
},
{
"epoch": 0.13612136121361212,
"grad_norm": 2.3449130058288574,
"learning_rate": 6.778969234612584e-05,
"loss": 4.4233,
"step": 83
},
{
"epoch": 0.13776137761377613,
"grad_norm": 2.3670763969421387,
"learning_rate": 6.701465872208216e-05,
"loss": 4.5521,
"step": 84
},
{
"epoch": 0.13940139401394014,
"grad_norm": 2.303210496902466,
"learning_rate": 6.623497346023418e-05,
"loss": 4.158,
"step": 85
},
{
"epoch": 0.14104141041410415,
"grad_norm": 2.2928717136383057,
"learning_rate": 6.545084971874738e-05,
"loss": 4.4811,
"step": 86
},
{
"epoch": 0.14268142681426815,
"grad_norm": 2.3217215538024902,
"learning_rate": 6.466250186922325e-05,
"loss": 4.4757,
"step": 87
},
{
"epoch": 0.14432144321443213,
"grad_norm": 2.4649085998535156,
"learning_rate": 6.387014543809223e-05,
"loss": 4.3929,
"step": 88
},
{
"epoch": 0.14596145961459614,
"grad_norm": 2.4416439533233643,
"learning_rate": 6.307399704769099e-05,
"loss": 4.3651,
"step": 89
},
{
"epoch": 0.14760147601476015,
"grad_norm": 2.407564401626587,
"learning_rate": 6.227427435703997e-05,
"loss": 4.5105,
"step": 90
},
{
"epoch": 0.14924149241492415,
"grad_norm": 2.5387256145477295,
"learning_rate": 6.147119600233758e-05,
"loss": 4.3569,
"step": 91
},
{
"epoch": 0.15088150881508816,
"grad_norm": 2.485275983810425,
"learning_rate": 6.066498153718735e-05,
"loss": 4.1993,
"step": 92
},
{
"epoch": 0.15252152521525214,
"grad_norm": 2.6093876361846924,
"learning_rate": 5.985585137257401e-05,
"loss": 4.4529,
"step": 93
},
{
"epoch": 0.15416154161541615,
"grad_norm": 2.7150611877441406,
"learning_rate": 5.90440267166055e-05,
"loss": 4.298,
"step": 94
},
{
"epoch": 0.15580155801558015,
"grad_norm": 2.773834466934204,
"learning_rate": 5.8229729514036705e-05,
"loss": 4.5244,
"step": 95
},
{
"epoch": 0.15744157441574416,
"grad_norm": 2.696632146835327,
"learning_rate": 5.74131823855921e-05,
"loss": 4.2331,
"step": 96
},
{
"epoch": 0.15908159081590817,
"grad_norm": 2.822608470916748,
"learning_rate": 5.6594608567103456e-05,
"loss": 3.9903,
"step": 97
},
{
"epoch": 0.16072160721607215,
"grad_norm": 3.0407216548919678,
"learning_rate": 5.577423184847932e-05,
"loss": 4.4542,
"step": 98
},
{
"epoch": 0.16236162361623616,
"grad_norm": 3.1217188835144043,
"learning_rate": 5.495227651252315e-05,
"loss": 4.8318,
"step": 99
},
{
"epoch": 0.16400164001640016,
"grad_norm": 3.499589681625366,
"learning_rate": 5.4128967273616625e-05,
"loss": 3.9586,
"step": 100
},
{
"epoch": 0.16400164001640016,
"eval_loss": 1.0330122709274292,
"eval_runtime": 241.293,
"eval_samples_per_second": 4.256,
"eval_steps_per_second": 1.065,
"step": 100
},
{
"epoch": 0.16564165641656417,
"grad_norm": 2.629485607147217,
"learning_rate": 5.330452921628497e-05,
"loss": 3.1455,
"step": 101
},
{
"epoch": 0.16728167281672818,
"grad_norm": 2.4439175128936768,
"learning_rate": 5.247918773366112e-05,
"loss": 3.4364,
"step": 102
},
{
"epoch": 0.16892168921689216,
"grad_norm": 2.5433027744293213,
"learning_rate": 5.165316846586541e-05,
"loss": 4.0761,
"step": 103
},
{
"epoch": 0.17056170561705616,
"grad_norm": 2.309931516647339,
"learning_rate": 5.0826697238317935e-05,
"loss": 3.6827,
"step": 104
},
{
"epoch": 0.17220172201722017,
"grad_norm": 2.292348861694336,
"learning_rate": 5e-05,
"loss": 4.0615,
"step": 105
},
{
"epoch": 0.17384173841738418,
"grad_norm": 2.323428153991699,
"learning_rate": 4.917330276168208e-05,
"loss": 3.6907,
"step": 106
},
{
"epoch": 0.17548175481754819,
"grad_norm": 2.3306071758270264,
"learning_rate": 4.834683153413459e-05,
"loss": 3.7316,
"step": 107
},
{
"epoch": 0.17712177121771217,
"grad_norm": 2.2622241973876953,
"learning_rate": 4.7520812266338885e-05,
"loss": 3.8164,
"step": 108
},
{
"epoch": 0.17876178761787617,
"grad_norm": 2.264270305633545,
"learning_rate": 4.669547078371504e-05,
"loss": 4.2311,
"step": 109
},
{
"epoch": 0.18040180401804018,
"grad_norm": 2.2461841106414795,
"learning_rate": 4.5871032726383386e-05,
"loss": 3.5878,
"step": 110
},
{
"epoch": 0.1820418204182042,
"grad_norm": 2.299285888671875,
"learning_rate": 4.504772348747687e-05,
"loss": 3.7959,
"step": 111
},
{
"epoch": 0.1836818368183682,
"grad_norm": 2.302234411239624,
"learning_rate": 4.4225768151520694e-05,
"loss": 3.749,
"step": 112
},
{
"epoch": 0.18532185321853217,
"grad_norm": 2.314466714859009,
"learning_rate": 4.3405391432896555e-05,
"loss": 3.575,
"step": 113
},
{
"epoch": 0.18696186961869618,
"grad_norm": 2.3300201892852783,
"learning_rate": 4.2586817614407895e-05,
"loss": 3.8614,
"step": 114
},
{
"epoch": 0.1886018860188602,
"grad_norm": 2.199928045272827,
"learning_rate": 4.17702704859633e-05,
"loss": 4.1803,
"step": 115
},
{
"epoch": 0.1902419024190242,
"grad_norm": 2.3204433917999268,
"learning_rate": 4.095597328339452e-05,
"loss": 4.1178,
"step": 116
},
{
"epoch": 0.1918819188191882,
"grad_norm": 2.221572160720825,
"learning_rate": 4.0144148627425993e-05,
"loss": 3.9985,
"step": 117
},
{
"epoch": 0.19352193521935218,
"grad_norm": 2.2999792098999023,
"learning_rate": 3.933501846281267e-05,
"loss": 4.2054,
"step": 118
},
{
"epoch": 0.1951619516195162,
"grad_norm": 2.226060390472412,
"learning_rate": 3.852880399766243e-05,
"loss": 4.4533,
"step": 119
},
{
"epoch": 0.1968019680196802,
"grad_norm": 2.199904441833496,
"learning_rate": 3.772572564296005e-05,
"loss": 4.3227,
"step": 120
},
{
"epoch": 0.1984419844198442,
"grad_norm": 2.226961374282837,
"learning_rate": 3.6926002952309016e-05,
"loss": 4.3761,
"step": 121
},
{
"epoch": 0.2000820008200082,
"grad_norm": 2.268184185028076,
"learning_rate": 3.612985456190778e-05,
"loss": 4.2865,
"step": 122
},
{
"epoch": 0.2017220172201722,
"grad_norm": 2.24127197265625,
"learning_rate": 3.533749813077677e-05,
"loss": 4.0891,
"step": 123
},
{
"epoch": 0.2033620336203362,
"grad_norm": 2.2995402812957764,
"learning_rate": 3.4549150281252636e-05,
"loss": 4.5376,
"step": 124
},
{
"epoch": 0.2050020500205002,
"grad_norm": 2.2306911945343018,
"learning_rate": 3.3765026539765834e-05,
"loss": 3.9252,
"step": 125
},
{
"epoch": 0.2066420664206642,
"grad_norm": 2.324394464492798,
"learning_rate": 3.298534127791785e-05,
"loss": 4.1393,
"step": 126
},
{
"epoch": 0.20828208282082822,
"grad_norm": 2.341024875640869,
"learning_rate": 3.221030765387417e-05,
"loss": 4.2623,
"step": 127
},
{
"epoch": 0.2099220992209922,
"grad_norm": 2.2914934158325195,
"learning_rate": 3.144013755408895e-05,
"loss": 4.1272,
"step": 128
},
{
"epoch": 0.2115621156211562,
"grad_norm": 2.265011787414551,
"learning_rate": 3.0675041535377405e-05,
"loss": 4.7511,
"step": 129
},
{
"epoch": 0.2132021320213202,
"grad_norm": 2.2810287475585938,
"learning_rate": 2.991522876735154e-05,
"loss": 3.9507,
"step": 130
},
{
"epoch": 0.21484214842148422,
"grad_norm": 2.209026336669922,
"learning_rate": 2.916090697523549e-05,
"loss": 4.1971,
"step": 131
},
{
"epoch": 0.21648216482164823,
"grad_norm": 2.271674871444702,
"learning_rate": 2.8412282383075363e-05,
"loss": 4.2369,
"step": 132
},
{
"epoch": 0.2181221812218122,
"grad_norm": 2.2816481590270996,
"learning_rate": 2.766955965735968e-05,
"loss": 4.4193,
"step": 133
},
{
"epoch": 0.2197621976219762,
"grad_norm": 2.369513511657715,
"learning_rate": 2.693294185106562e-05,
"loss": 4.2053,
"step": 134
},
{
"epoch": 0.22140221402214022,
"grad_norm": 2.2750861644744873,
"learning_rate": 2.6202630348146324e-05,
"loss": 4.4168,
"step": 135
},
{
"epoch": 0.22304223042230423,
"grad_norm": 2.3277220726013184,
"learning_rate": 2.547882480847461e-05,
"loss": 4.5227,
"step": 136
},
{
"epoch": 0.22468224682246823,
"grad_norm": 2.4248850345611572,
"learning_rate": 2.476172311325783e-05,
"loss": 4.0977,
"step": 137
},
{
"epoch": 0.22632226322263221,
"grad_norm": 2.430215358734131,
"learning_rate": 2.405152131093926e-05,
"loss": 4.3858,
"step": 138
},
{
"epoch": 0.22796227962279622,
"grad_norm": 2.4897172451019287,
"learning_rate": 2.3348413563600325e-05,
"loss": 4.0341,
"step": 139
},
{
"epoch": 0.22960229602296023,
"grad_norm": 2.6170473098754883,
"learning_rate": 2.2652592093878666e-05,
"loss": 4.6414,
"step": 140
},
{
"epoch": 0.23124231242312424,
"grad_norm": 2.5205514430999756,
"learning_rate": 2.196424713241637e-05,
"loss": 3.8447,
"step": 141
},
{
"epoch": 0.23288232882328824,
"grad_norm": 2.59279465675354,
"learning_rate": 2.128356686585282e-05,
"loss": 3.8828,
"step": 142
},
{
"epoch": 0.23452234522345222,
"grad_norm": 2.650130033493042,
"learning_rate": 2.061073738537635e-05,
"loss": 4.1121,
"step": 143
},
{
"epoch": 0.23616236162361623,
"grad_norm": 2.6162757873535156,
"learning_rate": 1.9945942635848748e-05,
"loss": 4.2642,
"step": 144
},
{
"epoch": 0.23780237802378024,
"grad_norm": 2.8053736686706543,
"learning_rate": 1.928936436551661e-05,
"loss": 4.1126,
"step": 145
},
{
"epoch": 0.23944239442394424,
"grad_norm": 2.6850321292877197,
"learning_rate": 1.8641182076323148e-05,
"loss": 4.5236,
"step": 146
},
{
"epoch": 0.24108241082410825,
"grad_norm": 2.9415037631988525,
"learning_rate": 1.800157297483417e-05,
"loss": 4.5156,
"step": 147
},
{
"epoch": 0.24272242722427223,
"grad_norm": 2.9442765712738037,
"learning_rate": 1.7370711923791567e-05,
"loss": 4.4899,
"step": 148
},
{
"epoch": 0.24436244362443624,
"grad_norm": 3.2887613773345947,
"learning_rate": 1.6748771394307585e-05,
"loss": 4.3883,
"step": 149
},
{
"epoch": 0.24600246002460024,
"grad_norm": 3.433468818664551,
"learning_rate": 1.6135921418712956e-05,
"loss": 4.0055,
"step": 150
},
{
"epoch": 0.24600246002460024,
"eval_loss": 1.0182451009750366,
"eval_runtime": 241.7957,
"eval_samples_per_second": 4.247,
"eval_steps_per_second": 1.063,
"step": 150
},
{
"epoch": 0.24764247642476425,
"grad_norm": 2.4129600524902344,
"learning_rate": 1.553232954407171e-05,
"loss": 3.7138,
"step": 151
},
{
"epoch": 0.24928249282492826,
"grad_norm": 2.38737154006958,
"learning_rate": 1.4938160786375572e-05,
"loss": 3.3665,
"step": 152
},
{
"epoch": 0.25092250922509224,
"grad_norm": 2.384030818939209,
"learning_rate": 1.435357758543015e-05,
"loss": 3.6905,
"step": 153
},
{
"epoch": 0.2525625256252563,
"grad_norm": 2.2609331607818604,
"learning_rate": 1.3778739760445552e-05,
"loss": 3.612,
"step": 154
},
{
"epoch": 0.25420254202542025,
"grad_norm": 2.3165335655212402,
"learning_rate": 1.3213804466343421e-05,
"loss": 4.0229,
"step": 155
},
{
"epoch": 0.25584255842558423,
"grad_norm": 2.305833339691162,
"learning_rate": 1.2658926150792322e-05,
"loss": 3.6824,
"step": 156
},
{
"epoch": 0.25748257482574827,
"grad_norm": 2.259158134460449,
"learning_rate": 1.2114256511983274e-05,
"loss": 3.9486,
"step": 157
},
{
"epoch": 0.25912259122591225,
"grad_norm": 2.233785390853882,
"learning_rate": 1.157994445715706e-05,
"loss": 3.3779,
"step": 158
},
{
"epoch": 0.2607626076260763,
"grad_norm": 2.2110209465026855,
"learning_rate": 1.1056136061894384e-05,
"loss": 3.5337,
"step": 159
},
{
"epoch": 0.26240262402624026,
"grad_norm": 2.3029186725616455,
"learning_rate": 1.0542974530180327e-05,
"loss": 4.0395,
"step": 160
},
{
"epoch": 0.26404264042640424,
"grad_norm": 2.217092514038086,
"learning_rate": 1.0040600155253765e-05,
"loss": 3.7837,
"step": 161
},
{
"epoch": 0.2656826568265683,
"grad_norm": 2.242913007736206,
"learning_rate": 9.549150281252633e-06,
"loss": 4.2022,
"step": 162
},
{
"epoch": 0.26732267322673225,
"grad_norm": 2.2055246829986572,
"learning_rate": 9.068759265665384e-06,
"loss": 3.9344,
"step": 163
},
{
"epoch": 0.2689626896268963,
"grad_norm": 2.2303857803344727,
"learning_rate": 8.599558442598998e-06,
"loss": 3.7108,
"step": 164
},
{
"epoch": 0.27060270602706027,
"grad_norm": 2.276827335357666,
"learning_rate": 8.141676086873572e-06,
"loss": 4.2158,
"step": 165
},
{
"epoch": 0.27224272242722425,
"grad_norm": 2.2376182079315186,
"learning_rate": 7.695237378953223e-06,
"loss": 3.8198,
"step": 166
},
{
"epoch": 0.2738827388273883,
"grad_norm": 2.158167600631714,
"learning_rate": 7.260364370723044e-06,
"loss": 3.8707,
"step": 167
},
{
"epoch": 0.27552275522755226,
"grad_norm": 2.253467321395874,
"learning_rate": 6.837175952121306e-06,
"loss": 4.2192,
"step": 168
},
{
"epoch": 0.2771627716277163,
"grad_norm": 2.3017327785491943,
"learning_rate": 6.425787818636131e-06,
"loss": 4.2194,
"step": 169
},
{
"epoch": 0.2788027880278803,
"grad_norm": 2.281419038772583,
"learning_rate": 6.026312439675552e-06,
"loss": 4.1228,
"step": 170
},
{
"epoch": 0.28044280442804426,
"grad_norm": 2.24662709236145,
"learning_rate": 5.6388590278194096e-06,
"loss": 4.1829,
"step": 171
},
{
"epoch": 0.2820828208282083,
"grad_norm": 2.320794105529785,
"learning_rate": 5.263533508961827e-06,
"loss": 4.03,
"step": 172
},
{
"epoch": 0.28372283722837227,
"grad_norm": 2.2237203121185303,
"learning_rate": 4.900438493352055e-06,
"loss": 3.4464,
"step": 173
},
{
"epoch": 0.2853628536285363,
"grad_norm": 2.324625253677368,
"learning_rate": 4.549673247541875e-06,
"loss": 5.0423,
"step": 174
},
{
"epoch": 0.2870028700287003,
"grad_norm": 2.2332985401153564,
"learning_rate": 4.2113336672471245e-06,
"loss": 3.878,
"step": 175
},
{
"epoch": 0.28864288642886426,
"grad_norm": 2.308011531829834,
"learning_rate": 3.885512251130763e-06,
"loss": 4.1407,
"step": 176
},
{
"epoch": 0.2902829028290283,
"grad_norm": 2.244994640350342,
"learning_rate": 3.5722980755146517e-06,
"loss": 3.9916,
"step": 177
},
{
"epoch": 0.2919229192291923,
"grad_norm": 2.2235755920410156,
"learning_rate": 3.271776770026963e-06,
"loss": 4.0572,
"step": 178
},
{
"epoch": 0.2935629356293563,
"grad_norm": 2.230010986328125,
"learning_rate": 2.9840304941919415e-06,
"loss": 4.254,
"step": 179
},
{
"epoch": 0.2952029520295203,
"grad_norm": 2.25651216506958,
"learning_rate": 2.7091379149682685e-06,
"loss": 4.3738,
"step": 180
},
{
"epoch": 0.2968429684296843,
"grad_norm": 2.3242099285125732,
"learning_rate": 2.4471741852423237e-06,
"loss": 4.3826,
"step": 181
},
{
"epoch": 0.2984829848298483,
"grad_norm": 2.2767388820648193,
"learning_rate": 2.1982109232821178e-06,
"loss": 4.4344,
"step": 182
},
{
"epoch": 0.3001230012300123,
"grad_norm": 2.244988441467285,
"learning_rate": 1.962316193157593e-06,
"loss": 4.2265,
"step": 183
},
{
"epoch": 0.3017630176301763,
"grad_norm": 2.329388380050659,
"learning_rate": 1.7395544861325718e-06,
"loss": 4.3677,
"step": 184
},
{
"epoch": 0.3034030340303403,
"grad_norm": 2.2560641765594482,
"learning_rate": 1.5299867030334814e-06,
"loss": 4.3038,
"step": 185
},
{
"epoch": 0.3050430504305043,
"grad_norm": 2.335202932357788,
"learning_rate": 1.333670137599713e-06,
"loss": 4.618,
"step": 186
},
{
"epoch": 0.3066830668306683,
"grad_norm": 2.324882984161377,
"learning_rate": 1.1506584608200367e-06,
"loss": 3.9209,
"step": 187
},
{
"epoch": 0.3083230832308323,
"grad_norm": 2.4008238315582275,
"learning_rate": 9.810017062595322e-07,
"loss": 4.369,
"step": 188
},
{
"epoch": 0.30996309963099633,
"grad_norm": 2.385094165802002,
"learning_rate": 8.247462563808817e-07,
"loss": 4.5269,
"step": 189
},
{
"epoch": 0.3116031160311603,
"grad_norm": 2.500152587890625,
"learning_rate": 6.819348298638839e-07,
"loss": 4.3647,
"step": 190
},
{
"epoch": 0.3132431324313243,
"grad_norm": 2.5596444606781006,
"learning_rate": 5.526064699265753e-07,
"loss": 4.3113,
"step": 191
},
{
"epoch": 0.3148831488314883,
"grad_norm": 2.6978306770324707,
"learning_rate": 4.367965336512403e-07,
"loss": 4.5529,
"step": 192
},
{
"epoch": 0.3165231652316523,
"grad_norm": 2.485746383666992,
"learning_rate": 3.3453668231809286e-07,
"loss": 3.9753,
"step": 193
},
{
"epoch": 0.31816318163181634,
"grad_norm": 2.694565534591675,
"learning_rate": 2.458548727494292e-07,
"loss": 4.218,
"step": 194
},
{
"epoch": 0.3198031980319803,
"grad_norm": 2.7190346717834473,
"learning_rate": 1.7077534966650766e-07,
"loss": 4.4973,
"step": 195
},
{
"epoch": 0.3214432144321443,
"grad_norm": 2.875120162963867,
"learning_rate": 1.0931863906127327e-07,
"loss": 4.6409,
"step": 196
},
{
"epoch": 0.32308323083230833,
"grad_norm": 3.024665355682373,
"learning_rate": 6.150154258476315e-08,
"loss": 4.6979,
"step": 197
},
{
"epoch": 0.3247232472324723,
"grad_norm": 3.1800918579101562,
"learning_rate": 2.7337132953697554e-08,
"loss": 4.2454,
"step": 198
},
{
"epoch": 0.32636326363263635,
"grad_norm": 3.309856653213501,
"learning_rate": 6.834750376549792e-09,
"loss": 4.0409,
"step": 199
},
{
"epoch": 0.3280032800328003,
"grad_norm": 3.703190326690674,
"learning_rate": 0.0,
"loss": 4.1479,
"step": 200
},
{
"epoch": 0.3280032800328003,
"eval_loss": 1.01239013671875,
"eval_runtime": 241.0405,
"eval_samples_per_second": 4.261,
"eval_steps_per_second": 1.066,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.552272274872074e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}