prxy5605's picture
Training in progress, step 129, checkpoint
f5a787e verified
{
"best_metric": 2.5794575214385986,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 3.017543859649123,
"eval_steps": 50,
"global_step": 129,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.023391812865497075,
"grad_norm": 2.8899965286254883,
"learning_rate": 1e-05,
"loss": 3.621,
"step": 1
},
{
"epoch": 0.023391812865497075,
"eval_loss": 4.880038738250732,
"eval_runtime": 3.2301,
"eval_samples_per_second": 22.291,
"eval_steps_per_second": 5.573,
"step": 1
},
{
"epoch": 0.04678362573099415,
"grad_norm": 3.9275195598602295,
"learning_rate": 2e-05,
"loss": 3.9065,
"step": 2
},
{
"epoch": 0.07017543859649122,
"grad_norm": 5.344070911407471,
"learning_rate": 3e-05,
"loss": 4.0645,
"step": 3
},
{
"epoch": 0.0935672514619883,
"grad_norm": 4.742477893829346,
"learning_rate": 4e-05,
"loss": 3.9164,
"step": 4
},
{
"epoch": 0.11695906432748537,
"grad_norm": 4.960448265075684,
"learning_rate": 5e-05,
"loss": 4.1087,
"step": 5
},
{
"epoch": 0.14035087719298245,
"grad_norm": 5.740211009979248,
"learning_rate": 6e-05,
"loss": 3.9335,
"step": 6
},
{
"epoch": 0.16374269005847952,
"grad_norm": 7.367751598358154,
"learning_rate": 7e-05,
"loss": 3.8536,
"step": 7
},
{
"epoch": 0.1871345029239766,
"grad_norm": 6.333174228668213,
"learning_rate": 8e-05,
"loss": 3.8627,
"step": 8
},
{
"epoch": 0.21052631578947367,
"grad_norm": 7.679598808288574,
"learning_rate": 9e-05,
"loss": 3.4869,
"step": 9
},
{
"epoch": 0.23391812865497075,
"grad_norm": 8.830903053283691,
"learning_rate": 0.0001,
"loss": 3.8319,
"step": 10
},
{
"epoch": 0.2573099415204678,
"grad_norm": 5.146438121795654,
"learning_rate": 9.998257709344246e-05,
"loss": 3.4504,
"step": 11
},
{
"epoch": 0.2807017543859649,
"grad_norm": 3.7169127464294434,
"learning_rate": 9.993032051607669e-05,
"loss": 3.3142,
"step": 12
},
{
"epoch": 0.30409356725146197,
"grad_norm": 3.373077869415283,
"learning_rate": 9.984326668636131e-05,
"loss": 3.1032,
"step": 13
},
{
"epoch": 0.32748538011695905,
"grad_norm": 3.1468732357025146,
"learning_rate": 9.972147627352592e-05,
"loss": 3.0006,
"step": 14
},
{
"epoch": 0.3508771929824561,
"grad_norm": 3.2604331970214844,
"learning_rate": 9.956503415528984e-05,
"loss": 3.1792,
"step": 15
},
{
"epoch": 0.3742690058479532,
"grad_norm": 3.397855758666992,
"learning_rate": 9.937404935870938e-05,
"loss": 3.0764,
"step": 16
},
{
"epoch": 0.39766081871345027,
"grad_norm": 3.596156120300293,
"learning_rate": 9.91486549841951e-05,
"loss": 2.7991,
"step": 17
},
{
"epoch": 0.42105263157894735,
"grad_norm": 4.534525394439697,
"learning_rate": 9.888900811275204e-05,
"loss": 2.8614,
"step": 18
},
{
"epoch": 0.4444444444444444,
"grad_norm": 5.425872802734375,
"learning_rate": 9.859528969650738e-05,
"loss": 2.9683,
"step": 19
},
{
"epoch": 0.4678362573099415,
"grad_norm": 10.605550765991211,
"learning_rate": 9.826770443260193e-05,
"loss": 3.8376,
"step": 20
},
{
"epoch": 0.49122807017543857,
"grad_norm": 3.0964808464050293,
"learning_rate": 9.79064806205334e-05,
"loss": 3.1105,
"step": 21
},
{
"epoch": 0.5146198830409356,
"grad_norm": 2.7125508785247803,
"learning_rate": 9.751187000305076e-05,
"loss": 2.934,
"step": 22
},
{
"epoch": 0.5380116959064327,
"grad_norm": 2.679877758026123,
"learning_rate": 9.708414759071059e-05,
"loss": 2.8647,
"step": 23
},
{
"epoch": 0.5614035087719298,
"grad_norm": 2.6572399139404297,
"learning_rate": 9.662361147021779e-05,
"loss": 2.7279,
"step": 24
},
{
"epoch": 0.5847953216374269,
"grad_norm": 2.8284733295440674,
"learning_rate": 9.613058259668416e-05,
"loss": 2.7198,
"step": 25
},
{
"epoch": 0.6081871345029239,
"grad_norm": 3.222668409347534,
"learning_rate": 9.56054045699494e-05,
"loss": 2.9284,
"step": 26
},
{
"epoch": 0.631578947368421,
"grad_norm": 3.3537814617156982,
"learning_rate": 9.504844339512095e-05,
"loss": 2.6431,
"step": 27
},
{
"epoch": 0.6549707602339181,
"grad_norm": 3.6838319301605225,
"learning_rate": 9.446008722749905e-05,
"loss": 2.7465,
"step": 28
},
{
"epoch": 0.6783625730994152,
"grad_norm": 4.8847432136535645,
"learning_rate": 9.384074610206495e-05,
"loss": 2.8716,
"step": 29
},
{
"epoch": 0.7017543859649122,
"grad_norm": 6.4051103591918945,
"learning_rate": 9.319085164772082e-05,
"loss": 2.7997,
"step": 30
},
{
"epoch": 0.7251461988304093,
"grad_norm": 2.051379680633545,
"learning_rate": 9.251085678648072e-05,
"loss": 2.8797,
"step": 31
},
{
"epoch": 0.7485380116959064,
"grad_norm": 2.1589481830596924,
"learning_rate": 9.180123541782171e-05,
"loss": 2.5857,
"step": 32
},
{
"epoch": 0.7719298245614035,
"grad_norm": 2.3289027214050293,
"learning_rate": 9.106248208841569e-05,
"loss": 2.5558,
"step": 33
},
{
"epoch": 0.7953216374269005,
"grad_norm": 2.580897569656372,
"learning_rate": 9.029511164747175e-05,
"loss": 2.5283,
"step": 34
},
{
"epoch": 0.8187134502923976,
"grad_norm": 2.79526948928833,
"learning_rate": 8.949965888792941e-05,
"loss": 2.4669,
"step": 35
},
{
"epoch": 0.8421052631578947,
"grad_norm": 2.9586293697357178,
"learning_rate": 8.867667817375266e-05,
"loss": 2.4342,
"step": 36
},
{
"epoch": 0.8654970760233918,
"grad_norm": 3.565655469894409,
"learning_rate": 8.78267430535848e-05,
"loss": 2.5569,
"step": 37
},
{
"epoch": 0.8888888888888888,
"grad_norm": 3.735215425491333,
"learning_rate": 8.695044586103296e-05,
"loss": 2.6734,
"step": 38
},
{
"epoch": 0.9122807017543859,
"grad_norm": 4.107497692108154,
"learning_rate": 8.604839730186125e-05,
"loss": 2.3497,
"step": 39
},
{
"epoch": 0.935672514619883,
"grad_norm": 6.719999313354492,
"learning_rate": 8.512122602837993e-05,
"loss": 2.793,
"step": 40
},
{
"epoch": 0.9590643274853801,
"grad_norm": 1.973287582397461,
"learning_rate": 8.416957820132742e-05,
"loss": 2.8444,
"step": 41
},
{
"epoch": 0.9824561403508771,
"grad_norm": 2.6278090476989746,
"learning_rate": 8.319411703955042e-05,
"loss": 2.3696,
"step": 42
},
{
"epoch": 1.0058479532163742,
"grad_norm": 4.79380464553833,
"learning_rate": 8.219552235779578e-05,
"loss": 3.6136,
"step": 43
},
{
"epoch": 1.0292397660818713,
"grad_norm": 1.4787054061889648,
"learning_rate": 8.117449009293668e-05,
"loss": 2.1233,
"step": 44
},
{
"epoch": 1.0526315789473684,
"grad_norm": 1.8972117900848389,
"learning_rate": 8.013173181896283e-05,
"loss": 2.4294,
"step": 45
},
{
"epoch": 1.0760233918128654,
"grad_norm": 2.160202980041504,
"learning_rate": 7.9067974251073e-05,
"loss": 2.2788,
"step": 46
},
{
"epoch": 1.0994152046783625,
"grad_norm": 2.476989269256592,
"learning_rate": 7.79839587392154e-05,
"loss": 2.1933,
"step": 47
},
{
"epoch": 1.1228070175438596,
"grad_norm": 2.577780246734619,
"learning_rate": 7.688044075142887e-05,
"loss": 1.8599,
"step": 48
},
{
"epoch": 1.1461988304093567,
"grad_norm": 3.1708245277404785,
"learning_rate": 7.57581893473448e-05,
"loss": 1.9908,
"step": 49
},
{
"epoch": 1.1695906432748537,
"grad_norm": 3.2842891216278076,
"learning_rate": 7.461798664221711e-05,
"loss": 1.838,
"step": 50
},
{
"epoch": 1.1695906432748537,
"eval_loss": 2.667975425720215,
"eval_runtime": 3.2693,
"eval_samples_per_second": 22.023,
"eval_steps_per_second": 5.506,
"step": 50
},
{
"epoch": 1.1929824561403508,
"grad_norm": 3.982672691345215,
"learning_rate": 7.346062726185332e-05,
"loss": 1.9851,
"step": 51
},
{
"epoch": 1.2163742690058479,
"grad_norm": 4.517981052398682,
"learning_rate": 7.228691778882693e-05,
"loss": 1.7932,
"step": 52
},
{
"epoch": 1.239766081871345,
"grad_norm": 4.8002166748046875,
"learning_rate": 7.109767620035689e-05,
"loss": 2.3657,
"step": 53
},
{
"epoch": 1.263157894736842,
"grad_norm": 2.333965301513672,
"learning_rate": 6.989373129824604e-05,
"loss": 2.1879,
"step": 54
},
{
"epoch": 1.286549707602339,
"grad_norm": 2.5241196155548096,
"learning_rate": 6.867592213127558e-05,
"loss": 2.2052,
"step": 55
},
{
"epoch": 1.3099415204678362,
"grad_norm": 2.5986971855163574,
"learning_rate": 6.744509741045835e-05,
"loss": 2.0875,
"step": 56
},
{
"epoch": 1.3333333333333333,
"grad_norm": 2.735356569290161,
"learning_rate": 6.62021149175583e-05,
"loss": 2.2641,
"step": 57
},
{
"epoch": 1.3567251461988303,
"grad_norm": 2.861424684524536,
"learning_rate": 6.494784090728852e-05,
"loss": 2.1462,
"step": 58
},
{
"epoch": 1.3801169590643274,
"grad_norm": 2.953516960144043,
"learning_rate": 6.368314950360415e-05,
"loss": 1.9254,
"step": 59
},
{
"epoch": 1.4035087719298245,
"grad_norm": 3.429384231567383,
"learning_rate": 6.240892209051121e-05,
"loss": 2.0428,
"step": 60
},
{
"epoch": 1.4269005847953216,
"grad_norm": 3.717503070831299,
"learning_rate": 6.112604669781572e-05,
"loss": 1.6045,
"step": 61
},
{
"epoch": 1.4502923976608186,
"grad_norm": 4.62086820602417,
"learning_rate": 5.983541738224141e-05,
"loss": 1.5816,
"step": 62
},
{
"epoch": 1.4736842105263157,
"grad_norm": 5.065677165985107,
"learning_rate": 5.853793360434687e-05,
"loss": 2.1671,
"step": 63
},
{
"epoch": 1.4970760233918128,
"grad_norm": 3.0961179733276367,
"learning_rate": 5.7234499601677026e-05,
"loss": 2.3001,
"step": 64
},
{
"epoch": 1.52046783625731,
"grad_norm": 2.8138766288757324,
"learning_rate": 5.5926023758585146e-05,
"loss": 2.272,
"step": 65
},
{
"epoch": 1.543859649122807,
"grad_norm": 2.5831410884857178,
"learning_rate": 5.4613417973165106e-05,
"loss": 1.9027,
"step": 66
},
{
"epoch": 1.5672514619883042,
"grad_norm": 3.276082754135132,
"learning_rate": 5.329759702173477e-05,
"loss": 2.3347,
"step": 67
},
{
"epoch": 1.590643274853801,
"grad_norm": 3.1773629188537598,
"learning_rate": 5.197947792131348e-05,
"loss": 1.9598,
"step": 68
},
{
"epoch": 1.6140350877192984,
"grad_norm": 3.4641201496124268,
"learning_rate": 5.0659979290537954e-05,
"loss": 1.9397,
"step": 69
},
{
"epoch": 1.6374269005847952,
"grad_norm": 3.880099058151245,
"learning_rate": 4.934002070946206e-05,
"loss": 1.8497,
"step": 70
},
{
"epoch": 1.6608187134502925,
"grad_norm": 3.90553879737854,
"learning_rate": 4.802052207868654e-05,
"loss": 1.692,
"step": 71
},
{
"epoch": 1.6842105263157894,
"grad_norm": 5.151843070983887,
"learning_rate": 4.670240297826523e-05,
"loss": 1.7849,
"step": 72
},
{
"epoch": 1.7076023391812867,
"grad_norm": 4.96315336227417,
"learning_rate": 4.5386582026834906e-05,
"loss": 1.9887,
"step": 73
},
{
"epoch": 1.7309941520467835,
"grad_norm": 1.8645200729370117,
"learning_rate": 4.407397624141487e-05,
"loss": 2.2243,
"step": 74
},
{
"epoch": 1.7543859649122808,
"grad_norm": 2.2499165534973145,
"learning_rate": 4.276550039832299e-05,
"loss": 2.24,
"step": 75
},
{
"epoch": 1.7777777777777777,
"grad_norm": 2.531435251235962,
"learning_rate": 4.146206639565312e-05,
"loss": 2.167,
"step": 76
},
{
"epoch": 1.801169590643275,
"grad_norm": 2.621704578399658,
"learning_rate": 4.01645826177586e-05,
"loss": 2.0048,
"step": 77
},
{
"epoch": 1.8245614035087718,
"grad_norm": 3.041447401046753,
"learning_rate": 3.887395330218429e-05,
"loss": 1.9858,
"step": 78
},
{
"epoch": 1.8479532163742691,
"grad_norm": 3.520282745361328,
"learning_rate": 3.759107790948882e-05,
"loss": 1.9349,
"step": 79
},
{
"epoch": 1.871345029239766,
"grad_norm": 3.5151638984680176,
"learning_rate": 3.631685049639586e-05,
"loss": 1.6562,
"step": 80
},
{
"epoch": 1.8947368421052633,
"grad_norm": 4.325654983520508,
"learning_rate": 3.505215909271149e-05,
"loss": 1.9125,
"step": 81
},
{
"epoch": 1.9181286549707601,
"grad_norm": 5.122260570526123,
"learning_rate": 3.379788508244171e-05,
"loss": 1.6561,
"step": 82
},
{
"epoch": 1.9415204678362574,
"grad_norm": 5.443163871765137,
"learning_rate": 3.255490258954167e-05,
"loss": 2.5903,
"step": 83
},
{
"epoch": 1.9649122807017543,
"grad_norm": 2.3221733570098877,
"learning_rate": 3.132407786872442e-05,
"loss": 1.9197,
"step": 84
},
{
"epoch": 1.9883040935672516,
"grad_norm": 4.910060405731201,
"learning_rate": 3.0106268701753965e-05,
"loss": 2.2014,
"step": 85
},
{
"epoch": 2.0116959064327484,
"grad_norm": 3.926137685775757,
"learning_rate": 2.8902323799643116e-05,
"loss": 2.227,
"step": 86
},
{
"epoch": 2.0350877192982457,
"grad_norm": 1.8208245038986206,
"learning_rate": 2.771308221117309e-05,
"loss": 1.8213,
"step": 87
},
{
"epoch": 2.0584795321637426,
"grad_norm": 2.0533692836761475,
"learning_rate": 2.6539372738146695e-05,
"loss": 1.6875,
"step": 88
},
{
"epoch": 2.08187134502924,
"grad_norm": 2.5335185527801514,
"learning_rate": 2.5382013357782893e-05,
"loss": 1.7146,
"step": 89
},
{
"epoch": 2.1052631578947367,
"grad_norm": 2.658461093902588,
"learning_rate": 2.4241810652655196e-05,
"loss": 1.6028,
"step": 90
},
{
"epoch": 2.128654970760234,
"grad_norm": 2.784259796142578,
"learning_rate": 2.3119559248571128e-05,
"loss": 1.5149,
"step": 91
},
{
"epoch": 2.152046783625731,
"grad_norm": 3.2446987628936768,
"learning_rate": 2.2016041260784605e-05,
"loss": 1.2587,
"step": 92
},
{
"epoch": 2.175438596491228,
"grad_norm": 3.726407527923584,
"learning_rate": 2.0932025748927013e-05,
"loss": 1.286,
"step": 93
},
{
"epoch": 2.198830409356725,
"grad_norm": 4.698680400848389,
"learning_rate": 1.9868268181037185e-05,
"loss": 1.2943,
"step": 94
},
{
"epoch": 2.2222222222222223,
"grad_norm": 4.862926483154297,
"learning_rate": 1.8825509907063327e-05,
"loss": 0.9368,
"step": 95
},
{
"epoch": 2.245614035087719,
"grad_norm": 4.17417573928833,
"learning_rate": 1.7804477642204222e-05,
"loss": 1.8199,
"step": 96
},
{
"epoch": 2.2690058479532165,
"grad_norm": 3.123291254043579,
"learning_rate": 1.6805882960449594e-05,
"loss": 2.1358,
"step": 97
},
{
"epoch": 2.2923976608187133,
"grad_norm": 3.0750467777252197,
"learning_rate": 1.5830421798672568e-05,
"loss": 1.6731,
"step": 98
},
{
"epoch": 2.3157894736842106,
"grad_norm": 3.460376262664795,
"learning_rate": 1.4878773971620074e-05,
"loss": 1.7389,
"step": 99
},
{
"epoch": 2.3391812865497075,
"grad_norm": 3.5447030067443848,
"learning_rate": 1.3951602698138771e-05,
"loss": 1.5398,
"step": 100
},
{
"epoch": 2.3391812865497075,
"eval_loss": 2.5794575214385986,
"eval_runtime": 3.2727,
"eval_samples_per_second": 22.0,
"eval_steps_per_second": 5.5,
"step": 100
},
{
"epoch": 2.3625730994152048,
"grad_norm": 3.9561784267425537,
"learning_rate": 1.3049554138967051e-05,
"loss": 1.5536,
"step": 101
},
{
"epoch": 2.3859649122807016,
"grad_norm": 3.936734914779663,
"learning_rate": 1.2173256946415212e-05,
"loss": 1.4171,
"step": 102
},
{
"epoch": 2.409356725146199,
"grad_norm": 4.206720352172852,
"learning_rate": 1.1323321826247346e-05,
"loss": 1.2999,
"step": 103
},
{
"epoch": 2.4327485380116958,
"grad_norm": 5.046553611755371,
"learning_rate": 1.0500341112070605e-05,
"loss": 1.0988,
"step": 104
},
{
"epoch": 2.456140350877193,
"grad_norm": 5.955111026763916,
"learning_rate": 9.704888352528258e-06,
"loss": 0.9185,
"step": 105
},
{
"epoch": 2.47953216374269,
"grad_norm": 3.576009511947632,
"learning_rate": 8.93751791158432e-06,
"loss": 1.9256,
"step": 106
},
{
"epoch": 2.502923976608187,
"grad_norm": 2.3604319095611572,
"learning_rate": 8.198764582178303e-06,
"loss": 1.9458,
"step": 107
},
{
"epoch": 2.526315789473684,
"grad_norm": 2.6111466884613037,
"learning_rate": 7.489143213519301e-06,
"loss": 1.7967,
"step": 108
},
{
"epoch": 2.5497076023391814,
"grad_norm": 3.0367538928985596,
"learning_rate": 6.809148352279182e-06,
"loss": 1.4832,
"step": 109
},
{
"epoch": 2.573099415204678,
"grad_norm": 3.416107416152954,
"learning_rate": 6.159253897935069e-06,
"loss": 1.6191,
"step": 110
},
{
"epoch": 2.5964912280701755,
"grad_norm": 3.648995876312256,
"learning_rate": 5.539912772500944e-06,
"loss": 1.4337,
"step": 111
},
{
"epoch": 2.6198830409356724,
"grad_norm": 3.623403310775757,
"learning_rate": 4.951556604879048e-06,
"loss": 1.1803,
"step": 112
},
{
"epoch": 2.6432748538011697,
"grad_norm": 4.428551197052002,
"learning_rate": 4.394595430050613e-06,
"loss": 1.1911,
"step": 113
},
{
"epoch": 2.6666666666666665,
"grad_norm": 4.538227558135986,
"learning_rate": 3.8694174033158555e-06,
"loss": 1.0355,
"step": 114
},
{
"epoch": 2.690058479532164,
"grad_norm": 5.722489833831787,
"learning_rate": 3.376388529782215e-06,
"loss": 0.9762,
"step": 115
},
{
"epoch": 2.7134502923976607,
"grad_norm": 3.4204437732696533,
"learning_rate": 2.9158524092894214e-06,
"loss": 1.7561,
"step": 116
},
{
"epoch": 2.736842105263158,
"grad_norm": 2.083275079727173,
"learning_rate": 2.488129996949251e-06,
"loss": 1.8624,
"step": 117
},
{
"epoch": 2.760233918128655,
"grad_norm": 2.574385404586792,
"learning_rate": 2.093519379466602e-06,
"loss": 1.7507,
"step": 118
},
{
"epoch": 2.783625730994152,
"grad_norm": 2.773837089538574,
"learning_rate": 1.7322955673980678e-06,
"loss": 1.6317,
"step": 119
},
{
"epoch": 2.807017543859649,
"grad_norm": 3.0657918453216553,
"learning_rate": 1.404710303492618e-06,
"loss": 1.7106,
"step": 120
},
{
"epoch": 2.8304093567251463,
"grad_norm": 3.1810173988342285,
"learning_rate": 1.1109918872479642e-06,
"loss": 1.4181,
"step": 121
},
{
"epoch": 2.853801169590643,
"grad_norm": 3.865628719329834,
"learning_rate": 8.513450158049108e-07,
"loss": 1.4733,
"step": 122
},
{
"epoch": 2.8771929824561404,
"grad_norm": 3.952064275741577,
"learning_rate": 6.259506412906402e-07,
"loss": 1.1925,
"step": 123
},
{
"epoch": 2.9005847953216373,
"grad_norm": 4.404904842376709,
"learning_rate": 4.349658447101612e-07,
"loss": 1.0374,
"step": 124
},
{
"epoch": 2.9239766081871346,
"grad_norm": 5.374385356903076,
"learning_rate": 2.785237264740781e-07,
"loss": 0.8663,
"step": 125
},
{
"epoch": 2.9473684210526314,
"grad_norm": 4.283792018890381,
"learning_rate": 1.567333136387017e-07,
"loss": 1.8765,
"step": 126
},
{
"epoch": 2.9707602339181287,
"grad_norm": 3.530892848968506,
"learning_rate": 6.967948392331835e-08,
"loss": 1.4727,
"step": 127
},
{
"epoch": 2.9941520467836256,
"grad_norm": 6.096131324768066,
"learning_rate": 1.742290655755707e-08,
"loss": 1.1021,
"step": 128
},
{
"epoch": 3.017543859649123,
"grad_norm": 3.1924569606781006,
"learning_rate": 0.0,
"loss": 2.1947,
"step": 129
}
],
"logging_steps": 1,
"max_steps": 129,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.386784338870272e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}