Romain-XV's picture
Training in progress, step 120, checkpoint
1ce7ada verified
{
"best_metric": 1.3121320009231567,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 1.0052356020942408,
"eval_steps": 50,
"global_step": 120,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008376963350785341,
"grad_norm": 1.4030457735061646,
"learning_rate": 2e-05,
"loss": 2.1507,
"step": 1
},
{
"epoch": 0.008376963350785341,
"eval_loss": 2.1936235427856445,
"eval_runtime": 54.1297,
"eval_samples_per_second": 7.445,
"eval_steps_per_second": 1.866,
"step": 1
},
{
"epoch": 0.016753926701570682,
"grad_norm": 1.4992213249206543,
"learning_rate": 4e-05,
"loss": 2.0929,
"step": 2
},
{
"epoch": 0.025130890052356022,
"grad_norm": 1.784857988357544,
"learning_rate": 6e-05,
"loss": 2.1302,
"step": 3
},
{
"epoch": 0.033507853403141365,
"grad_norm": 1.5829931497573853,
"learning_rate": 8e-05,
"loss": 2.1207,
"step": 4
},
{
"epoch": 0.041884816753926704,
"grad_norm": 1.2313480377197266,
"learning_rate": 0.0001,
"loss": 2.0151,
"step": 5
},
{
"epoch": 0.050261780104712044,
"grad_norm": 1.41518235206604,
"learning_rate": 0.00012,
"loss": 1.9839,
"step": 6
},
{
"epoch": 0.05863874345549738,
"grad_norm": 0.8072548508644104,
"learning_rate": 0.00014,
"loss": 1.9233,
"step": 7
},
{
"epoch": 0.06701570680628273,
"grad_norm": 0.5549747943878174,
"learning_rate": 0.00016,
"loss": 1.8144,
"step": 8
},
{
"epoch": 0.07539267015706806,
"grad_norm": 0.5323136448860168,
"learning_rate": 0.00018,
"loss": 1.7493,
"step": 9
},
{
"epoch": 0.08376963350785341,
"grad_norm": 0.5407094955444336,
"learning_rate": 0.0002,
"loss": 1.7072,
"step": 10
},
{
"epoch": 0.09214659685863874,
"grad_norm": 0.46159911155700684,
"learning_rate": 0.00019995921928281894,
"loss": 1.651,
"step": 11
},
{
"epoch": 0.10052356020942409,
"grad_norm": 0.6404337882995605,
"learning_rate": 0.00019983691039261357,
"loss": 1.6431,
"step": 12
},
{
"epoch": 0.10890052356020942,
"grad_norm": 0.352531373500824,
"learning_rate": 0.00019963317308626914,
"loss": 1.6052,
"step": 13
},
{
"epoch": 0.11727748691099477,
"grad_norm": 0.3619747757911682,
"learning_rate": 0.00019934817353485501,
"loss": 1.5712,
"step": 14
},
{
"epoch": 0.1256544502617801,
"grad_norm": 0.519852876663208,
"learning_rate": 0.0001989821441880933,
"loss": 1.5411,
"step": 15
},
{
"epoch": 0.13403141361256546,
"grad_norm": 0.4429551064968109,
"learning_rate": 0.00019853538358476932,
"loss": 1.5922,
"step": 16
},
{
"epoch": 0.1424083769633508,
"grad_norm": 0.3226163983345032,
"learning_rate": 0.00019800825610923934,
"loss": 1.5483,
"step": 17
},
{
"epoch": 0.15078534031413612,
"grad_norm": 0.3610597550868988,
"learning_rate": 0.00019740119169423337,
"loss": 1.5054,
"step": 18
},
{
"epoch": 0.15916230366492146,
"grad_norm": 0.29118454456329346,
"learning_rate": 0.00019671468547019573,
"loss": 1.5432,
"step": 19
},
{
"epoch": 0.16753926701570682,
"grad_norm": 0.24570101499557495,
"learning_rate": 0.00019594929736144976,
"loss": 1.4688,
"step": 20
},
{
"epoch": 0.17591623036649215,
"grad_norm": 0.28832247853279114,
"learning_rate": 0.00019510565162951537,
"loss": 1.4375,
"step": 21
},
{
"epoch": 0.18429319371727748,
"grad_norm": 0.25904181599617004,
"learning_rate": 0.00019418443636395248,
"loss": 1.534,
"step": 22
},
{
"epoch": 0.19267015706806281,
"grad_norm": 0.2384636104106903,
"learning_rate": 0.00019318640292114524,
"loss": 1.4328,
"step": 23
},
{
"epoch": 0.20104712041884817,
"grad_norm": 0.27154815196990967,
"learning_rate": 0.000192112365311485,
"loss": 1.4292,
"step": 24
},
{
"epoch": 0.2094240837696335,
"grad_norm": 0.2336822897195816,
"learning_rate": 0.00019096319953545185,
"loss": 1.4027,
"step": 25
},
{
"epoch": 0.21780104712041884,
"grad_norm": 0.21763642132282257,
"learning_rate": 0.00018973984286913584,
"loss": 1.4238,
"step": 26
},
{
"epoch": 0.2261780104712042,
"grad_norm": 0.2171494960784912,
"learning_rate": 0.00018844329309978145,
"loss": 1.4108,
"step": 27
},
{
"epoch": 0.23455497382198953,
"grad_norm": 0.20013433694839478,
"learning_rate": 0.00018707460771197774,
"loss": 1.3777,
"step": 28
},
{
"epoch": 0.24293193717277486,
"grad_norm": 0.22450464963912964,
"learning_rate": 0.0001856349030251589,
"loss": 1.396,
"step": 29
},
{
"epoch": 0.2513089005235602,
"grad_norm": 0.23654663562774658,
"learning_rate": 0.00018412535328311814,
"loss": 1.4103,
"step": 30
},
{
"epoch": 0.25968586387434556,
"grad_norm": 0.2156527042388916,
"learning_rate": 0.0001825471896962774,
"loss": 1.4367,
"step": 31
},
{
"epoch": 0.2680628272251309,
"grad_norm": 0.19791820645332336,
"learning_rate": 0.00018090169943749476,
"loss": 1.4173,
"step": 32
},
{
"epoch": 0.2764397905759162,
"grad_norm": 0.2152637392282486,
"learning_rate": 0.00017919022459222752,
"loss": 1.3479,
"step": 33
},
{
"epoch": 0.2848167539267016,
"grad_norm": 0.20826572179794312,
"learning_rate": 0.00017741416106390826,
"loss": 1.3828,
"step": 34
},
{
"epoch": 0.2931937172774869,
"grad_norm": 0.20378273725509644,
"learning_rate": 0.00017557495743542585,
"loss": 1.3657,
"step": 35
},
{
"epoch": 0.30157068062827225,
"grad_norm": 0.2143692672252655,
"learning_rate": 0.0001736741137876405,
"loss": 1.4046,
"step": 36
},
{
"epoch": 0.3099476439790576,
"grad_norm": 0.22452476620674133,
"learning_rate": 0.00017171318047589637,
"loss": 1.3673,
"step": 37
},
{
"epoch": 0.3183246073298429,
"grad_norm": 0.20779763162136078,
"learning_rate": 0.00016969375686552937,
"loss": 1.4057,
"step": 38
},
{
"epoch": 0.3267015706806283,
"grad_norm": 0.2210998684167862,
"learning_rate": 0.00016761749002740193,
"loss": 1.3789,
"step": 39
},
{
"epoch": 0.33507853403141363,
"grad_norm": 0.22668598592281342,
"learning_rate": 0.00016548607339452853,
"loss": 1.3272,
"step": 40
},
{
"epoch": 0.34345549738219894,
"grad_norm": 0.21081912517547607,
"learning_rate": 0.00016330124538088705,
"loss": 1.4033,
"step": 41
},
{
"epoch": 0.3518324607329843,
"grad_norm": 0.20994143187999725,
"learning_rate": 0.00016106478796354382,
"loss": 1.4135,
"step": 42
},
{
"epoch": 0.36020942408376966,
"grad_norm": 0.22178034484386444,
"learning_rate": 0.00015877852522924732,
"loss": 1.3235,
"step": 43
},
{
"epoch": 0.36858638743455496,
"grad_norm": 0.26765337586402893,
"learning_rate": 0.00015644432188667695,
"loss": 1.3852,
"step": 44
},
{
"epoch": 0.3769633507853403,
"grad_norm": 0.31858474016189575,
"learning_rate": 0.00015406408174555976,
"loss": 1.338,
"step": 45
},
{
"epoch": 0.38534031413612563,
"grad_norm": 0.24434570968151093,
"learning_rate": 0.0001516397461638962,
"loss": 1.3631,
"step": 46
},
{
"epoch": 0.393717277486911,
"grad_norm": 0.22655674815177917,
"learning_rate": 0.0001491732924645604,
"loss": 1.3075,
"step": 47
},
{
"epoch": 0.40209424083769635,
"grad_norm": 0.21461047232151031,
"learning_rate": 0.00014666673232256738,
"loss": 1.3294,
"step": 48
},
{
"epoch": 0.41047120418848165,
"grad_norm": 0.2243211269378662,
"learning_rate": 0.00014412211012432212,
"loss": 1.3267,
"step": 49
},
{
"epoch": 0.418848167539267,
"grad_norm": 0.22946247458457947,
"learning_rate": 0.00014154150130018866,
"loss": 1.3904,
"step": 50
},
{
"epoch": 0.418848167539267,
"eval_loss": 1.3593463897705078,
"eval_runtime": 54.6174,
"eval_samples_per_second": 7.379,
"eval_steps_per_second": 1.849,
"step": 50
},
{
"epoch": 0.4272251308900524,
"grad_norm": 0.2250441461801529,
"learning_rate": 0.00013892701063173918,
"loss": 1.3205,
"step": 51
},
{
"epoch": 0.4356020942408377,
"grad_norm": 0.2325247824192047,
"learning_rate": 0.0001362807705350641,
"loss": 1.3751,
"step": 52
},
{
"epoch": 0.44397905759162304,
"grad_norm": 0.23405078053474426,
"learning_rate": 0.00013360493932154302,
"loss": 1.3023,
"step": 53
},
{
"epoch": 0.4523560209424084,
"grad_norm": 0.24694718420505524,
"learning_rate": 0.00013090169943749476,
"loss": 1.329,
"step": 54
},
{
"epoch": 0.4607329842931937,
"grad_norm": 0.23932890594005585,
"learning_rate": 0.00012817325568414297,
"loss": 1.308,
"step": 55
},
{
"epoch": 0.46910994764397906,
"grad_norm": 0.23090919852256775,
"learning_rate": 0.00012542183341934872,
"loss": 1.3658,
"step": 56
},
{
"epoch": 0.4774869109947644,
"grad_norm": 0.23715175688266754,
"learning_rate": 0.00012264967674257646,
"loss": 1.2654,
"step": 57
},
{
"epoch": 0.48586387434554973,
"grad_norm": 0.24813269078731537,
"learning_rate": 0.00011985904666457455,
"loss": 1.3035,
"step": 58
},
{
"epoch": 0.4942408376963351,
"grad_norm": 0.2591025233268738,
"learning_rate": 0.0001170522192632624,
"loss": 1.3445,
"step": 59
},
{
"epoch": 0.5026178010471204,
"grad_norm": 0.26143878698349,
"learning_rate": 0.00011423148382732853,
"loss": 1.3675,
"step": 60
},
{
"epoch": 0.5109947643979058,
"grad_norm": 0.2329186201095581,
"learning_rate": 0.00011139914098905406,
"loss": 1.3384,
"step": 61
},
{
"epoch": 0.5193717277486911,
"grad_norm": 0.23178386688232422,
"learning_rate": 0.00010855750084788398,
"loss": 1.3093,
"step": 62
},
{
"epoch": 0.5277486910994764,
"grad_norm": 0.23026645183563232,
"learning_rate": 0.00010570888108627681,
"loss": 1.3064,
"step": 63
},
{
"epoch": 0.5361256544502618,
"grad_norm": 0.24100545048713684,
"learning_rate": 0.00010285560507936961,
"loss": 1.3258,
"step": 64
},
{
"epoch": 0.5445026178010471,
"grad_norm": 0.24575698375701904,
"learning_rate": 0.0001,
"loss": 1.3398,
"step": 65
},
{
"epoch": 0.5528795811518324,
"grad_norm": 0.2778918445110321,
"learning_rate": 9.71443949206304e-05,
"loss": 1.3778,
"step": 66
},
{
"epoch": 0.5612565445026177,
"grad_norm": 0.26030927896499634,
"learning_rate": 9.42911189137232e-05,
"loss": 1.3101,
"step": 67
},
{
"epoch": 0.5696335078534032,
"grad_norm": 0.2404455989599228,
"learning_rate": 9.144249915211605e-05,
"loss": 1.2912,
"step": 68
},
{
"epoch": 0.5780104712041885,
"grad_norm": 0.24763959646224976,
"learning_rate": 8.860085901094595e-05,
"loss": 1.3059,
"step": 69
},
{
"epoch": 0.5863874345549738,
"grad_norm": 0.24902191758155823,
"learning_rate": 8.57685161726715e-05,
"loss": 1.2653,
"step": 70
},
{
"epoch": 0.5947643979057592,
"grad_norm": 0.2563740015029907,
"learning_rate": 8.294778073673762e-05,
"loss": 1.3613,
"step": 71
},
{
"epoch": 0.6031413612565445,
"grad_norm": 0.2514787018299103,
"learning_rate": 8.014095333542548e-05,
"loss": 1.3108,
"step": 72
},
{
"epoch": 0.6115183246073298,
"grad_norm": 0.2779984772205353,
"learning_rate": 7.735032325742355e-05,
"loss": 1.2887,
"step": 73
},
{
"epoch": 0.6198952879581152,
"grad_norm": 0.24789296090602875,
"learning_rate": 7.457816658065134e-05,
"loss": 1.3755,
"step": 74
},
{
"epoch": 0.6282722513089005,
"grad_norm": 0.24377316236495972,
"learning_rate": 7.182674431585704e-05,
"loss": 1.2984,
"step": 75
},
{
"epoch": 0.6366492146596858,
"grad_norm": 0.24581335484981537,
"learning_rate": 6.909830056250527e-05,
"loss": 1.3265,
"step": 76
},
{
"epoch": 0.6450261780104712,
"grad_norm": 0.22706155478954315,
"learning_rate": 6.639506067845697e-05,
"loss": 1.2957,
"step": 77
},
{
"epoch": 0.6534031413612565,
"grad_norm": 0.2653581500053406,
"learning_rate": 6.371922946493591e-05,
"loss": 1.3115,
"step": 78
},
{
"epoch": 0.6617801047120419,
"grad_norm": 0.26213908195495605,
"learning_rate": 6.107298936826086e-05,
"loss": 1.2911,
"step": 79
},
{
"epoch": 0.6701570680628273,
"grad_norm": 0.2331765592098236,
"learning_rate": 5.845849869981137e-05,
"loss": 1.2769,
"step": 80
},
{
"epoch": 0.6785340314136126,
"grad_norm": 0.24162189662456512,
"learning_rate": 5.5877889875677845e-05,
"loss": 1.2802,
"step": 81
},
{
"epoch": 0.6869109947643979,
"grad_norm": 0.26189252734184265,
"learning_rate": 5.333326767743263e-05,
"loss": 1.3237,
"step": 82
},
{
"epoch": 0.6952879581151833,
"grad_norm": 0.22555485367774963,
"learning_rate": 5.082670753543961e-05,
"loss": 1.2745,
"step": 83
},
{
"epoch": 0.7036649214659686,
"grad_norm": 0.254704087972641,
"learning_rate": 4.836025383610382e-05,
"loss": 1.354,
"step": 84
},
{
"epoch": 0.7120418848167539,
"grad_norm": 0.28260111808776855,
"learning_rate": 4.593591825444028e-05,
"loss": 1.2935,
"step": 85
},
{
"epoch": 0.7204188481675393,
"grad_norm": 0.25090911984443665,
"learning_rate": 4.355567811332311e-05,
"loss": 1.2761,
"step": 86
},
{
"epoch": 0.7287958115183246,
"grad_norm": 0.26403096318244934,
"learning_rate": 4.12214747707527e-05,
"loss": 1.2903,
"step": 87
},
{
"epoch": 0.7371727748691099,
"grad_norm": 0.2612413763999939,
"learning_rate": 3.893521203645618e-05,
"loss": 1.356,
"step": 88
},
{
"epoch": 0.7455497382198953,
"grad_norm": 0.24037125706672668,
"learning_rate": 3.669875461911297e-05,
"loss": 1.2865,
"step": 89
},
{
"epoch": 0.7539267015706806,
"grad_norm": 0.252249538898468,
"learning_rate": 3.45139266054715e-05,
"loss": 1.2468,
"step": 90
},
{
"epoch": 0.762303664921466,
"grad_norm": 0.25495651364326477,
"learning_rate": 3.238250997259808e-05,
"loss": 1.3012,
"step": 91
},
{
"epoch": 0.7706806282722513,
"grad_norm": 0.2322113960981369,
"learning_rate": 3.030624313447067e-05,
"loss": 1.2818,
"step": 92
},
{
"epoch": 0.7790575916230367,
"grad_norm": 0.2572304606437683,
"learning_rate": 2.828681952410366e-05,
"loss": 1.2221,
"step": 93
},
{
"epoch": 0.787434554973822,
"grad_norm": 0.24987637996673584,
"learning_rate": 2.6325886212359498e-05,
"loss": 1.3307,
"step": 94
},
{
"epoch": 0.7958115183246073,
"grad_norm": 0.25658029317855835,
"learning_rate": 2.4425042564574184e-05,
"loss": 1.299,
"step": 95
},
{
"epoch": 0.8041884816753927,
"grad_norm": 0.23883827030658722,
"learning_rate": 2.2585838936091754e-05,
"loss": 1.3355,
"step": 96
},
{
"epoch": 0.812565445026178,
"grad_norm": 0.26572129130363464,
"learning_rate": 2.0809775407772503e-05,
"loss": 1.2445,
"step": 97
},
{
"epoch": 0.8209424083769633,
"grad_norm": 0.2597862780094147,
"learning_rate": 1.9098300562505266e-05,
"loss": 1.2866,
"step": 98
},
{
"epoch": 0.8293193717277487,
"grad_norm": 0.24302197992801666,
"learning_rate": 1.74528103037226e-05,
"loss": 1.2775,
"step": 99
},
{
"epoch": 0.837696335078534,
"grad_norm": 0.2617965638637543,
"learning_rate": 1.587464671688187e-05,
"loss": 1.3117,
"step": 100
},
{
"epoch": 0.837696335078534,
"eval_loss": 1.3121320009231567,
"eval_runtime": 54.6331,
"eval_samples_per_second": 7.376,
"eval_steps_per_second": 1.849,
"step": 100
},
{
"epoch": 0.8460732984293193,
"grad_norm": 0.24377289414405823,
"learning_rate": 1.4365096974841108e-05,
"loss": 1.2426,
"step": 101
},
{
"epoch": 0.8544502617801047,
"grad_norm": 0.27847498655319214,
"learning_rate": 1.2925392288022298e-05,
"loss": 1.2593,
"step": 102
},
{
"epoch": 0.86282722513089,
"grad_norm": 0.2640259563922882,
"learning_rate": 1.1556706900218572e-05,
"loss": 1.2703,
"step": 103
},
{
"epoch": 0.8712041884816754,
"grad_norm": 0.2538398504257202,
"learning_rate": 1.026015713086418e-05,
"loss": 1.2544,
"step": 104
},
{
"epoch": 0.8795811518324608,
"grad_norm": 0.244331493973732,
"learning_rate": 9.036800464548157e-06,
"loss": 1.3047,
"step": 105
},
{
"epoch": 0.8879581151832461,
"grad_norm": 0.26120704412460327,
"learning_rate": 7.887634688515e-06,
"loss": 1.2688,
"step": 106
},
{
"epoch": 0.8963350785340314,
"grad_norm": 0.2646150290966034,
"learning_rate": 6.813597078854772e-06,
"loss": 1.3536,
"step": 107
},
{
"epoch": 0.9047120418848168,
"grad_norm": 0.2558758854866028,
"learning_rate": 5.8155636360475385e-06,
"loss": 1.2918,
"step": 108
},
{
"epoch": 0.9130890052356021,
"grad_norm": 0.25811469554901123,
"learning_rate": 4.8943483704846475e-06,
"loss": 1.2869,
"step": 109
},
{
"epoch": 0.9214659685863874,
"grad_norm": 0.25502899289131165,
"learning_rate": 4.050702638550275e-06,
"loss": 1.2949,
"step": 110
},
{
"epoch": 0.9298429319371728,
"grad_norm": 0.25524598360061646,
"learning_rate": 3.2853145298042953e-06,
"loss": 1.27,
"step": 111
},
{
"epoch": 0.9382198952879581,
"grad_norm": 0.2520923614501953,
"learning_rate": 2.5988083057666533e-06,
"loss": 1.3135,
"step": 112
},
{
"epoch": 0.9465968586387434,
"grad_norm": 0.2525266110897064,
"learning_rate": 1.9917438907606556e-06,
"loss": 1.2766,
"step": 113
},
{
"epoch": 0.9549738219895288,
"grad_norm": 0.25608813762664795,
"learning_rate": 1.4646164152307018e-06,
"loss": 1.3429,
"step": 114
},
{
"epoch": 0.9633507853403142,
"grad_norm": 0.26775017380714417,
"learning_rate": 1.0178558119067315e-06,
"loss": 1.3198,
"step": 115
},
{
"epoch": 0.9717277486910995,
"grad_norm": 0.25307947397232056,
"learning_rate": 6.518264651449779e-07,
"loss": 1.2831,
"step": 116
},
{
"epoch": 0.9801047120418848,
"grad_norm": 0.26626452803611755,
"learning_rate": 3.6682691373086665e-07,
"loss": 1.2701,
"step": 117
},
{
"epoch": 0.9884816753926702,
"grad_norm": 0.22642602026462555,
"learning_rate": 1.630896073864352e-07,
"loss": 1.3243,
"step": 118
},
{
"epoch": 0.9968586387434555,
"grad_norm": 0.28676363825798035,
"learning_rate": 4.078071718107701e-08,
"loss": 1.3269,
"step": 119
},
{
"epoch": 1.0052356020942408,
"grad_norm": 0.5024040937423706,
"learning_rate": 0.0,
"loss": 2.1063,
"step": 120
}
],
"logging_steps": 1,
"max_steps": 120,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.327790086012273e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}