prxy5605's picture
Training in progress, step 200, checkpoint
13083cf verified
{
"best_metric": 10.32762336730957,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.02463888632233823,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00012319443161169116,
"grad_norm": 0.37034285068511963,
"learning_rate": 1e-05,
"loss": 41.5079,
"step": 1
},
{
"epoch": 0.00012319443161169116,
"eval_loss": 10.379491806030273,
"eval_runtime": 28.7674,
"eval_samples_per_second": 475.225,
"eval_steps_per_second": 118.815,
"step": 1
},
{
"epoch": 0.0002463888632233823,
"grad_norm": 0.41543495655059814,
"learning_rate": 2e-05,
"loss": 41.5068,
"step": 2
},
{
"epoch": 0.00036958329483507344,
"grad_norm": 0.3897373080253601,
"learning_rate": 3e-05,
"loss": 41.5251,
"step": 3
},
{
"epoch": 0.0004927777264467646,
"grad_norm": 0.4008975923061371,
"learning_rate": 4e-05,
"loss": 41.5305,
"step": 4
},
{
"epoch": 0.0006159721580584558,
"grad_norm": 0.39021116495132446,
"learning_rate": 5e-05,
"loss": 41.4983,
"step": 5
},
{
"epoch": 0.0007391665896701469,
"grad_norm": 0.4045765995979309,
"learning_rate": 6e-05,
"loss": 41.5147,
"step": 6
},
{
"epoch": 0.000862361021281838,
"grad_norm": 0.36582186818122864,
"learning_rate": 7e-05,
"loss": 41.538,
"step": 7
},
{
"epoch": 0.0009855554528935293,
"grad_norm": 0.38223153352737427,
"learning_rate": 8e-05,
"loss": 41.4999,
"step": 8
},
{
"epoch": 0.0011087498845052204,
"grad_norm": 0.3638783395290375,
"learning_rate": 9e-05,
"loss": 41.5122,
"step": 9
},
{
"epoch": 0.0012319443161169115,
"grad_norm": 0.3758089542388916,
"learning_rate": 0.0001,
"loss": 41.5182,
"step": 10
},
{
"epoch": 0.0013551387477286026,
"grad_norm": 0.4046088755130768,
"learning_rate": 9.999316524962345e-05,
"loss": 41.5346,
"step": 11
},
{
"epoch": 0.0014783331793402938,
"grad_norm": 0.4480150640010834,
"learning_rate": 9.997266286704631e-05,
"loss": 41.4884,
"step": 12
},
{
"epoch": 0.001601527610951985,
"grad_norm": 0.38534626364707947,
"learning_rate": 9.993849845741524e-05,
"loss": 41.4963,
"step": 13
},
{
"epoch": 0.001724722042563676,
"grad_norm": 0.46365806460380554,
"learning_rate": 9.989068136093873e-05,
"loss": 41.5039,
"step": 14
},
{
"epoch": 0.0018479164741753672,
"grad_norm": 0.49963846802711487,
"learning_rate": 9.98292246503335e-05,
"loss": 41.5156,
"step": 15
},
{
"epoch": 0.0019711109057870585,
"grad_norm": 0.49704065918922424,
"learning_rate": 9.975414512725057e-05,
"loss": 41.5083,
"step": 16
},
{
"epoch": 0.0020943053373987494,
"grad_norm": 0.4433138370513916,
"learning_rate": 9.966546331768191e-05,
"loss": 41.4786,
"step": 17
},
{
"epoch": 0.0022174997690104408,
"grad_norm": 0.5082147717475891,
"learning_rate": 9.956320346634876e-05,
"loss": 41.4805,
"step": 18
},
{
"epoch": 0.0023406942006221317,
"grad_norm": 0.5090774297714233,
"learning_rate": 9.944739353007344e-05,
"loss": 41.5024,
"step": 19
},
{
"epoch": 0.002463888632233823,
"grad_norm": 0.5320281386375427,
"learning_rate": 9.931806517013612e-05,
"loss": 41.4618,
"step": 20
},
{
"epoch": 0.0025870830638455144,
"grad_norm": 0.6355424523353577,
"learning_rate": 9.917525374361912e-05,
"loss": 41.4794,
"step": 21
},
{
"epoch": 0.0027102774954572053,
"grad_norm": 0.6119551062583923,
"learning_rate": 9.901899829374047e-05,
"loss": 41.4549,
"step": 22
},
{
"epoch": 0.0028334719270688966,
"grad_norm": 0.6141555309295654,
"learning_rate": 9.884934153917997e-05,
"loss": 41.4591,
"step": 23
},
{
"epoch": 0.0029566663586805876,
"grad_norm": 0.7022342085838318,
"learning_rate": 9.86663298624003e-05,
"loss": 41.4826,
"step": 24
},
{
"epoch": 0.003079860790292279,
"grad_norm": 0.7128514647483826,
"learning_rate": 9.847001329696653e-05,
"loss": 41.486,
"step": 25
},
{
"epoch": 0.00320305522190397,
"grad_norm": 0.7015478610992432,
"learning_rate": 9.826044551386744e-05,
"loss": 41.4673,
"step": 26
},
{
"epoch": 0.003326249653515661,
"grad_norm": 0.8382724523544312,
"learning_rate": 9.803768380684242e-05,
"loss": 41.4349,
"step": 27
},
{
"epoch": 0.003449444085127352,
"grad_norm": 0.8730006814002991,
"learning_rate": 9.780178907671789e-05,
"loss": 41.4475,
"step": 28
},
{
"epoch": 0.0035726385167390434,
"grad_norm": 0.9358540177345276,
"learning_rate": 9.755282581475769e-05,
"loss": 41.4493,
"step": 29
},
{
"epoch": 0.0036958329483507343,
"grad_norm": 0.892292320728302,
"learning_rate": 9.729086208503174e-05,
"loss": 41.4491,
"step": 30
},
{
"epoch": 0.0038190273799624257,
"grad_norm": 0.9767299890518188,
"learning_rate": 9.701596950580806e-05,
"loss": 41.4422,
"step": 31
},
{
"epoch": 0.003942221811574117,
"grad_norm": 0.9271118640899658,
"learning_rate": 9.672822322997305e-05,
"loss": 41.406,
"step": 32
},
{
"epoch": 0.004065416243185808,
"grad_norm": 1.1558642387390137,
"learning_rate": 9.642770192448536e-05,
"loss": 41.424,
"step": 33
},
{
"epoch": 0.004188610674797499,
"grad_norm": 1.2022905349731445,
"learning_rate": 9.611448774886924e-05,
"loss": 41.4216,
"step": 34
},
{
"epoch": 0.004311805106409191,
"grad_norm": 0.9297153949737549,
"learning_rate": 9.578866633275288e-05,
"loss": 41.4242,
"step": 35
},
{
"epoch": 0.0044349995380208816,
"grad_norm": 1.0654194355010986,
"learning_rate": 9.545032675245813e-05,
"loss": 41.4281,
"step": 36
},
{
"epoch": 0.0045581939696325725,
"grad_norm": 1.1089550256729126,
"learning_rate": 9.509956150664796e-05,
"loss": 41.3838,
"step": 37
},
{
"epoch": 0.004681388401244263,
"grad_norm": 1.1491121053695679,
"learning_rate": 9.473646649103818e-05,
"loss": 41.3703,
"step": 38
},
{
"epoch": 0.004804582832855955,
"grad_norm": 0.9658917784690857,
"learning_rate": 9.43611409721806e-05,
"loss": 41.3815,
"step": 39
},
{
"epoch": 0.004927777264467646,
"grad_norm": 0.9329264760017395,
"learning_rate": 9.397368756032445e-05,
"loss": 41.3701,
"step": 40
},
{
"epoch": 0.005050971696079337,
"grad_norm": 0.955482542514801,
"learning_rate": 9.357421218136386e-05,
"loss": 41.3742,
"step": 41
},
{
"epoch": 0.005174166127691029,
"grad_norm": 0.886280357837677,
"learning_rate": 9.316282404787871e-05,
"loss": 41.3932,
"step": 42
},
{
"epoch": 0.00529736055930272,
"grad_norm": 0.7329453825950623,
"learning_rate": 9.273963562927695e-05,
"loss": 41.3587,
"step": 43
},
{
"epoch": 0.005420554990914411,
"grad_norm": 0.776685893535614,
"learning_rate": 9.230476262104677e-05,
"loss": 41.3672,
"step": 44
},
{
"epoch": 0.0055437494225261015,
"grad_norm": 0.7093920707702637,
"learning_rate": 9.185832391312644e-05,
"loss": 41.3434,
"step": 45
},
{
"epoch": 0.005666943854137793,
"grad_norm": 0.6888406276702881,
"learning_rate": 9.140044155740101e-05,
"loss": 41.3421,
"step": 46
},
{
"epoch": 0.005790138285749484,
"grad_norm": 0.6905254125595093,
"learning_rate": 9.093124073433463e-05,
"loss": 41.3444,
"step": 47
},
{
"epoch": 0.005913332717361175,
"grad_norm": 0.6538366675376892,
"learning_rate": 9.045084971874738e-05,
"loss": 41.3437,
"step": 48
},
{
"epoch": 0.006036527148972866,
"grad_norm": 0.6010540723800659,
"learning_rate": 8.995939984474624e-05,
"loss": 41.3592,
"step": 49
},
{
"epoch": 0.006159721580584558,
"grad_norm": 0.7231239080429077,
"learning_rate": 8.945702546981969e-05,
"loss": 41.3236,
"step": 50
},
{
"epoch": 0.006159721580584558,
"eval_loss": 10.336989402770996,
"eval_runtime": 28.4939,
"eval_samples_per_second": 479.787,
"eval_steps_per_second": 119.956,
"step": 50
},
{
"epoch": 0.006282916012196249,
"grad_norm": 0.5357275605201721,
"learning_rate": 8.894386393810563e-05,
"loss": 41.3429,
"step": 51
},
{
"epoch": 0.00640611044380794,
"grad_norm": 0.45036154985427856,
"learning_rate": 8.842005554284296e-05,
"loss": 41.3454,
"step": 52
},
{
"epoch": 0.006529304875419631,
"grad_norm": 0.4848557710647583,
"learning_rate": 8.788574348801675e-05,
"loss": 41.3536,
"step": 53
},
{
"epoch": 0.006652499307031322,
"grad_norm": 0.5253639221191406,
"learning_rate": 8.73410738492077e-05,
"loss": 41.3685,
"step": 54
},
{
"epoch": 0.006775693738643013,
"grad_norm": 0.4596695899963379,
"learning_rate": 8.678619553365659e-05,
"loss": 41.357,
"step": 55
},
{
"epoch": 0.006898888170254704,
"grad_norm": 0.5238817930221558,
"learning_rate": 8.622126023955446e-05,
"loss": 41.3657,
"step": 56
},
{
"epoch": 0.007022082601866396,
"grad_norm": 0.4427866041660309,
"learning_rate": 8.564642241456986e-05,
"loss": 41.3481,
"step": 57
},
{
"epoch": 0.007145277033478087,
"grad_norm": 0.3829557001590729,
"learning_rate": 8.506183921362443e-05,
"loss": 41.3555,
"step": 58
},
{
"epoch": 0.007268471465089778,
"grad_norm": 0.3675144910812378,
"learning_rate": 8.44676704559283e-05,
"loss": 41.3587,
"step": 59
},
{
"epoch": 0.007391665896701469,
"grad_norm": 0.4128322899341583,
"learning_rate": 8.386407858128706e-05,
"loss": 41.3466,
"step": 60
},
{
"epoch": 0.0075148603283131605,
"grad_norm": 0.47052329778671265,
"learning_rate": 8.32512286056924e-05,
"loss": 41.3435,
"step": 61
},
{
"epoch": 0.007638054759924851,
"grad_norm": 0.4124845862388611,
"learning_rate": 8.262928807620843e-05,
"loss": 41.3465,
"step": 62
},
{
"epoch": 0.007761249191536542,
"grad_norm": 0.3967921733856201,
"learning_rate": 8.199842702516583e-05,
"loss": 41.3349,
"step": 63
},
{
"epoch": 0.007884443623148234,
"grad_norm": 0.44313010573387146,
"learning_rate": 8.135881792367686e-05,
"loss": 41.324,
"step": 64
},
{
"epoch": 0.008007638054759925,
"grad_norm": 0.32965555787086487,
"learning_rate": 8.07106356344834e-05,
"loss": 41.3273,
"step": 65
},
{
"epoch": 0.008130832486371616,
"grad_norm": 0.3851642310619354,
"learning_rate": 8.005405736415126e-05,
"loss": 41.3315,
"step": 66
},
{
"epoch": 0.008254026917983307,
"grad_norm": 0.43205058574676514,
"learning_rate": 7.938926261462366e-05,
"loss": 41.3255,
"step": 67
},
{
"epoch": 0.008377221349594998,
"grad_norm": 0.39321786165237427,
"learning_rate": 7.871643313414718e-05,
"loss": 41.3477,
"step": 68
},
{
"epoch": 0.008500415781206689,
"grad_norm": 0.441262423992157,
"learning_rate": 7.803575286758364e-05,
"loss": 41.3206,
"step": 69
},
{
"epoch": 0.008623610212818381,
"grad_norm": 0.34648528695106506,
"learning_rate": 7.734740790612136e-05,
"loss": 41.3267,
"step": 70
},
{
"epoch": 0.008746804644430072,
"grad_norm": 0.3708791732788086,
"learning_rate": 7.66515864363997e-05,
"loss": 41.3234,
"step": 71
},
{
"epoch": 0.008869999076041763,
"grad_norm": 0.3931605815887451,
"learning_rate": 7.594847868906076e-05,
"loss": 41.3447,
"step": 72
},
{
"epoch": 0.008993193507653454,
"grad_norm": 0.3708871901035309,
"learning_rate": 7.52382768867422e-05,
"loss": 41.3465,
"step": 73
},
{
"epoch": 0.009116387939265145,
"grad_norm": 0.34970754384994507,
"learning_rate": 7.452117519152542e-05,
"loss": 41.3311,
"step": 74
},
{
"epoch": 0.009239582370876836,
"grad_norm": 0.4211370050907135,
"learning_rate": 7.379736965185368e-05,
"loss": 41.3399,
"step": 75
},
{
"epoch": 0.009362776802488527,
"grad_norm": 0.36415454745292664,
"learning_rate": 7.30670581489344e-05,
"loss": 41.3429,
"step": 76
},
{
"epoch": 0.00948597123410022,
"grad_norm": 0.36701810359954834,
"learning_rate": 7.233044034264034e-05,
"loss": 41.3501,
"step": 77
},
{
"epoch": 0.00960916566571191,
"grad_norm": 0.45310914516448975,
"learning_rate": 7.158771761692464e-05,
"loss": 41.3187,
"step": 78
},
{
"epoch": 0.009732360097323601,
"grad_norm": 0.3968198597431183,
"learning_rate": 7.083909302476453e-05,
"loss": 41.3347,
"step": 79
},
{
"epoch": 0.009855554528935292,
"grad_norm": 0.34118831157684326,
"learning_rate": 7.008477123264848e-05,
"loss": 41.3228,
"step": 80
},
{
"epoch": 0.009978748960546983,
"grad_norm": 0.32499533891677856,
"learning_rate": 6.932495846462261e-05,
"loss": 41.3377,
"step": 81
},
{
"epoch": 0.010101943392158674,
"grad_norm": 0.39074838161468506,
"learning_rate": 6.855986244591104e-05,
"loss": 41.3265,
"step": 82
},
{
"epoch": 0.010225137823770365,
"grad_norm": 0.37348777055740356,
"learning_rate": 6.778969234612584e-05,
"loss": 41.3244,
"step": 83
},
{
"epoch": 0.010348332255382058,
"grad_norm": 0.3705248534679413,
"learning_rate": 6.701465872208216e-05,
"loss": 41.3355,
"step": 84
},
{
"epoch": 0.010471526686993748,
"grad_norm": 0.32013359665870667,
"learning_rate": 6.623497346023418e-05,
"loss": 41.3354,
"step": 85
},
{
"epoch": 0.01059472111860544,
"grad_norm": 0.3610351085662842,
"learning_rate": 6.545084971874738e-05,
"loss": 41.3316,
"step": 86
},
{
"epoch": 0.01071791555021713,
"grad_norm": 0.3613660931587219,
"learning_rate": 6.466250186922325e-05,
"loss": 41.3088,
"step": 87
},
{
"epoch": 0.010841109981828821,
"grad_norm": 0.48871198296546936,
"learning_rate": 6.387014543809223e-05,
"loss": 41.3168,
"step": 88
},
{
"epoch": 0.010964304413440512,
"grad_norm": 0.3728695213794708,
"learning_rate": 6.307399704769099e-05,
"loss": 41.2947,
"step": 89
},
{
"epoch": 0.011087498845052203,
"grad_norm": 0.4032233953475952,
"learning_rate": 6.227427435703997e-05,
"loss": 41.3213,
"step": 90
},
{
"epoch": 0.011210693276663894,
"grad_norm": 0.374328076839447,
"learning_rate": 6.147119600233758e-05,
"loss": 41.3285,
"step": 91
},
{
"epoch": 0.011333887708275587,
"grad_norm": 0.3907609283924103,
"learning_rate": 6.066498153718735e-05,
"loss": 41.2982,
"step": 92
},
{
"epoch": 0.011457082139887277,
"grad_norm": 0.4053857922554016,
"learning_rate": 5.985585137257401e-05,
"loss": 41.3239,
"step": 93
},
{
"epoch": 0.011580276571498968,
"grad_norm": 0.4666404724121094,
"learning_rate": 5.90440267166055e-05,
"loss": 41.3147,
"step": 94
},
{
"epoch": 0.01170347100311066,
"grad_norm": 0.4556801915168762,
"learning_rate": 5.8229729514036705e-05,
"loss": 41.288,
"step": 95
},
{
"epoch": 0.01182666543472235,
"grad_norm": 0.40397128462791443,
"learning_rate": 5.74131823855921e-05,
"loss": 41.3193,
"step": 96
},
{
"epoch": 0.011949859866334041,
"grad_norm": 0.4455827474594116,
"learning_rate": 5.6594608567103456e-05,
"loss": 41.2847,
"step": 97
},
{
"epoch": 0.012073054297945732,
"grad_norm": 0.4562518298625946,
"learning_rate": 5.577423184847932e-05,
"loss": 41.3214,
"step": 98
},
{
"epoch": 0.012196248729557425,
"grad_norm": 0.4034453332424164,
"learning_rate": 5.495227651252315e-05,
"loss": 41.2924,
"step": 99
},
{
"epoch": 0.012319443161169116,
"grad_norm": 0.5995578765869141,
"learning_rate": 5.4128967273616625e-05,
"loss": 41.2806,
"step": 100
},
{
"epoch": 0.012319443161169116,
"eval_loss": 10.330382347106934,
"eval_runtime": 28.739,
"eval_samples_per_second": 475.695,
"eval_steps_per_second": 118.932,
"step": 100
},
{
"epoch": 0.012442637592780807,
"grad_norm": 0.45945844054222107,
"learning_rate": 5.330452921628497e-05,
"loss": 41.3285,
"step": 101
},
{
"epoch": 0.012565832024392497,
"grad_norm": 0.3176629841327667,
"learning_rate": 5.247918773366112e-05,
"loss": 41.3579,
"step": 102
},
{
"epoch": 0.012689026456004188,
"grad_norm": 0.3888489305973053,
"learning_rate": 5.165316846586541e-05,
"loss": 41.3267,
"step": 103
},
{
"epoch": 0.01281222088761588,
"grad_norm": 0.3837381601333618,
"learning_rate": 5.0826697238317935e-05,
"loss": 41.3467,
"step": 104
},
{
"epoch": 0.01293541531922757,
"grad_norm": 0.3548348546028137,
"learning_rate": 5e-05,
"loss": 41.3039,
"step": 105
},
{
"epoch": 0.013058609750839263,
"grad_norm": 0.37431982159614563,
"learning_rate": 4.917330276168208e-05,
"loss": 41.3269,
"step": 106
},
{
"epoch": 0.013181804182450954,
"grad_norm": 0.4022105932235718,
"learning_rate": 4.834683153413459e-05,
"loss": 41.3233,
"step": 107
},
{
"epoch": 0.013304998614062645,
"grad_norm": 0.3207654654979706,
"learning_rate": 4.7520812266338885e-05,
"loss": 41.3501,
"step": 108
},
{
"epoch": 0.013428193045674336,
"grad_norm": 0.3817063868045807,
"learning_rate": 4.669547078371504e-05,
"loss": 41.3347,
"step": 109
},
{
"epoch": 0.013551387477286026,
"grad_norm": 0.3260782063007355,
"learning_rate": 4.5871032726383386e-05,
"loss": 41.3431,
"step": 110
},
{
"epoch": 0.013674581908897717,
"grad_norm": 0.39310598373413086,
"learning_rate": 4.504772348747687e-05,
"loss": 41.314,
"step": 111
},
{
"epoch": 0.013797776340509408,
"grad_norm": 0.3685118556022644,
"learning_rate": 4.4225768151520694e-05,
"loss": 41.3077,
"step": 112
},
{
"epoch": 0.013920970772121101,
"grad_norm": 0.3489997386932373,
"learning_rate": 4.3405391432896555e-05,
"loss": 41.3431,
"step": 113
},
{
"epoch": 0.014044165203732792,
"grad_norm": 0.3275761008262634,
"learning_rate": 4.2586817614407895e-05,
"loss": 41.3338,
"step": 114
},
{
"epoch": 0.014167359635344483,
"grad_norm": 0.37457457184791565,
"learning_rate": 4.17702704859633e-05,
"loss": 41.3491,
"step": 115
},
{
"epoch": 0.014290554066956174,
"grad_norm": 0.35564523935317993,
"learning_rate": 4.095597328339452e-05,
"loss": 41.3282,
"step": 116
},
{
"epoch": 0.014413748498567865,
"grad_norm": 0.2800564169883728,
"learning_rate": 4.0144148627425993e-05,
"loss": 41.323,
"step": 117
},
{
"epoch": 0.014536942930179556,
"grad_norm": 0.37886232137680054,
"learning_rate": 3.933501846281267e-05,
"loss": 41.3263,
"step": 118
},
{
"epoch": 0.014660137361791246,
"grad_norm": 0.34558311104774475,
"learning_rate": 3.852880399766243e-05,
"loss": 41.3342,
"step": 119
},
{
"epoch": 0.014783331793402937,
"grad_norm": 0.3198447525501251,
"learning_rate": 3.772572564296005e-05,
"loss": 41.3284,
"step": 120
},
{
"epoch": 0.01490652622501463,
"grad_norm": 0.29619088768959045,
"learning_rate": 3.6926002952309016e-05,
"loss": 41.303,
"step": 121
},
{
"epoch": 0.015029720656626321,
"grad_norm": 0.37361475825309753,
"learning_rate": 3.612985456190778e-05,
"loss": 41.314,
"step": 122
},
{
"epoch": 0.015152915088238012,
"grad_norm": 0.44932469725608826,
"learning_rate": 3.533749813077677e-05,
"loss": 41.3175,
"step": 123
},
{
"epoch": 0.015276109519849703,
"grad_norm": 0.37011954188346863,
"learning_rate": 3.4549150281252636e-05,
"loss": 41.3393,
"step": 124
},
{
"epoch": 0.015399303951461394,
"grad_norm": 0.3884902894496918,
"learning_rate": 3.3765026539765834e-05,
"loss": 41.3252,
"step": 125
},
{
"epoch": 0.015522498383073085,
"grad_norm": 0.3051610291004181,
"learning_rate": 3.298534127791785e-05,
"loss": 41.2991,
"step": 126
},
{
"epoch": 0.015645692814684777,
"grad_norm": 0.3233656585216522,
"learning_rate": 3.221030765387417e-05,
"loss": 41.3326,
"step": 127
},
{
"epoch": 0.015768887246296468,
"grad_norm": 0.4488888680934906,
"learning_rate": 3.144013755408895e-05,
"loss": 41.2823,
"step": 128
},
{
"epoch": 0.01589208167790816,
"grad_norm": 0.3062331974506378,
"learning_rate": 3.0675041535377405e-05,
"loss": 41.3405,
"step": 129
},
{
"epoch": 0.01601527610951985,
"grad_norm": 0.3733638525009155,
"learning_rate": 2.991522876735154e-05,
"loss": 41.3219,
"step": 130
},
{
"epoch": 0.01613847054113154,
"grad_norm": 0.3980589807033539,
"learning_rate": 2.916090697523549e-05,
"loss": 41.3033,
"step": 131
},
{
"epoch": 0.016261664972743232,
"grad_norm": 0.41536206007003784,
"learning_rate": 2.8412282383075363e-05,
"loss": 41.3283,
"step": 132
},
{
"epoch": 0.016384859404354923,
"grad_norm": 0.38303500413894653,
"learning_rate": 2.766955965735968e-05,
"loss": 41.3185,
"step": 133
},
{
"epoch": 0.016508053835966614,
"grad_norm": 0.3494269847869873,
"learning_rate": 2.693294185106562e-05,
"loss": 41.302,
"step": 134
},
{
"epoch": 0.016631248267578305,
"grad_norm": 0.30082157254219055,
"learning_rate": 2.6202630348146324e-05,
"loss": 41.3081,
"step": 135
},
{
"epoch": 0.016754442699189995,
"grad_norm": 0.3740646541118622,
"learning_rate": 2.547882480847461e-05,
"loss": 41.3094,
"step": 136
},
{
"epoch": 0.016877637130801686,
"grad_norm": 0.36229270696640015,
"learning_rate": 2.476172311325783e-05,
"loss": 41.3087,
"step": 137
},
{
"epoch": 0.017000831562413377,
"grad_norm": 0.36643633246421814,
"learning_rate": 2.405152131093926e-05,
"loss": 41.2834,
"step": 138
},
{
"epoch": 0.01712402599402507,
"grad_norm": 0.356387197971344,
"learning_rate": 2.3348413563600325e-05,
"loss": 41.3077,
"step": 139
},
{
"epoch": 0.017247220425636763,
"grad_norm": 0.36646512150764465,
"learning_rate": 2.2652592093878666e-05,
"loss": 41.2882,
"step": 140
},
{
"epoch": 0.017370414857248453,
"grad_norm": 0.2967137098312378,
"learning_rate": 2.196424713241637e-05,
"loss": 41.3225,
"step": 141
},
{
"epoch": 0.017493609288860144,
"grad_norm": 0.3648103177547455,
"learning_rate": 2.128356686585282e-05,
"loss": 41.3174,
"step": 142
},
{
"epoch": 0.017616803720471835,
"grad_norm": 0.37364527583122253,
"learning_rate": 2.061073738537635e-05,
"loss": 41.2795,
"step": 143
},
{
"epoch": 0.017739998152083526,
"grad_norm": 0.31925126910209656,
"learning_rate": 1.9945942635848748e-05,
"loss": 41.2903,
"step": 144
},
{
"epoch": 0.017863192583695217,
"grad_norm": 0.40006139874458313,
"learning_rate": 1.928936436551661e-05,
"loss": 41.2956,
"step": 145
},
{
"epoch": 0.017986387015306908,
"grad_norm": 0.4049123227596283,
"learning_rate": 1.8641182076323148e-05,
"loss": 41.2931,
"step": 146
},
{
"epoch": 0.0181095814469186,
"grad_norm": 0.38992005586624146,
"learning_rate": 1.800157297483417e-05,
"loss": 41.3238,
"step": 147
},
{
"epoch": 0.01823277587853029,
"grad_norm": 0.3740057945251465,
"learning_rate": 1.7370711923791567e-05,
"loss": 41.2797,
"step": 148
},
{
"epoch": 0.01835597031014198,
"grad_norm": 0.40869832038879395,
"learning_rate": 1.6748771394307585e-05,
"loss": 41.3091,
"step": 149
},
{
"epoch": 0.01847916474175367,
"grad_norm": 0.38570329546928406,
"learning_rate": 1.6135921418712956e-05,
"loss": 41.295,
"step": 150
},
{
"epoch": 0.01847916474175367,
"eval_loss": 10.32783031463623,
"eval_runtime": 28.686,
"eval_samples_per_second": 476.574,
"eval_steps_per_second": 119.152,
"step": 150
},
{
"epoch": 0.018602359173365363,
"grad_norm": 0.29322147369384766,
"learning_rate": 1.553232954407171e-05,
"loss": 41.3261,
"step": 151
},
{
"epoch": 0.018725553604977054,
"grad_norm": 0.3674944043159485,
"learning_rate": 1.4938160786375572e-05,
"loss": 41.326,
"step": 152
},
{
"epoch": 0.018848748036588748,
"grad_norm": 0.34021836519241333,
"learning_rate": 1.435357758543015e-05,
"loss": 41.3166,
"step": 153
},
{
"epoch": 0.01897194246820044,
"grad_norm": 0.4250488579273224,
"learning_rate": 1.3778739760445552e-05,
"loss": 41.324,
"step": 154
},
{
"epoch": 0.01909513689981213,
"grad_norm": 0.28842154145240784,
"learning_rate": 1.3213804466343421e-05,
"loss": 41.309,
"step": 155
},
{
"epoch": 0.01921833133142382,
"grad_norm": 0.37146052718162537,
"learning_rate": 1.2658926150792322e-05,
"loss": 41.3095,
"step": 156
},
{
"epoch": 0.01934152576303551,
"grad_norm": 0.3950320780277252,
"learning_rate": 1.2114256511983274e-05,
"loss": 41.3228,
"step": 157
},
{
"epoch": 0.019464720194647202,
"grad_norm": 0.3732362389564514,
"learning_rate": 1.157994445715706e-05,
"loss": 41.3206,
"step": 158
},
{
"epoch": 0.019587914626258893,
"grad_norm": 0.32746651768684387,
"learning_rate": 1.1056136061894384e-05,
"loss": 41.3418,
"step": 159
},
{
"epoch": 0.019711109057870584,
"grad_norm": 0.40929681062698364,
"learning_rate": 1.0542974530180327e-05,
"loss": 41.3331,
"step": 160
},
{
"epoch": 0.019834303489482275,
"grad_norm": 0.3719869554042816,
"learning_rate": 1.0040600155253765e-05,
"loss": 41.3383,
"step": 161
},
{
"epoch": 0.019957497921093966,
"grad_norm": 0.373146116733551,
"learning_rate": 9.549150281252633e-06,
"loss": 41.3259,
"step": 162
},
{
"epoch": 0.020080692352705657,
"grad_norm": 0.4307291805744171,
"learning_rate": 9.068759265665384e-06,
"loss": 41.34,
"step": 163
},
{
"epoch": 0.020203886784317348,
"grad_norm": 0.3563025891780853,
"learning_rate": 8.599558442598998e-06,
"loss": 41.3077,
"step": 164
},
{
"epoch": 0.02032708121592904,
"grad_norm": 0.2874586880207062,
"learning_rate": 8.141676086873572e-06,
"loss": 41.3375,
"step": 165
},
{
"epoch": 0.02045027564754073,
"grad_norm": 0.4188411235809326,
"learning_rate": 7.695237378953223e-06,
"loss": 41.3221,
"step": 166
},
{
"epoch": 0.02057347007915242,
"grad_norm": 0.3838663697242737,
"learning_rate": 7.260364370723044e-06,
"loss": 41.3361,
"step": 167
},
{
"epoch": 0.020696664510764115,
"grad_norm": 0.31003084778785706,
"learning_rate": 6.837175952121306e-06,
"loss": 41.3145,
"step": 168
},
{
"epoch": 0.020819858942375806,
"grad_norm": 0.35382720828056335,
"learning_rate": 6.425787818636131e-06,
"loss": 41.3053,
"step": 169
},
{
"epoch": 0.020943053373987497,
"grad_norm": 0.3113577663898468,
"learning_rate": 6.026312439675552e-06,
"loss": 41.34,
"step": 170
},
{
"epoch": 0.021066247805599188,
"grad_norm": 0.32111984491348267,
"learning_rate": 5.6388590278194096e-06,
"loss": 41.331,
"step": 171
},
{
"epoch": 0.02118944223721088,
"grad_norm": 0.3550707995891571,
"learning_rate": 5.263533508961827e-06,
"loss": 41.3191,
"step": 172
},
{
"epoch": 0.02131263666882257,
"grad_norm": 0.3614715337753296,
"learning_rate": 4.900438493352055e-06,
"loss": 41.2958,
"step": 173
},
{
"epoch": 0.02143583110043426,
"grad_norm": 0.4031304121017456,
"learning_rate": 4.549673247541875e-06,
"loss": 41.3089,
"step": 174
},
{
"epoch": 0.02155902553204595,
"grad_norm": 0.4373549818992615,
"learning_rate": 4.2113336672471245e-06,
"loss": 41.2899,
"step": 175
},
{
"epoch": 0.021682219963657642,
"grad_norm": 0.3327663838863373,
"learning_rate": 3.885512251130763e-06,
"loss": 41.3155,
"step": 176
},
{
"epoch": 0.021805414395269333,
"grad_norm": 0.31886810064315796,
"learning_rate": 3.5722980755146517e-06,
"loss": 41.2987,
"step": 177
},
{
"epoch": 0.021928608826881024,
"grad_norm": 0.36802637577056885,
"learning_rate": 3.271776770026963e-06,
"loss": 41.3308,
"step": 178
},
{
"epoch": 0.022051803258492715,
"grad_norm": 0.3171312212944031,
"learning_rate": 2.9840304941919415e-06,
"loss": 41.3021,
"step": 179
},
{
"epoch": 0.022174997690104406,
"grad_norm": 0.32209452986717224,
"learning_rate": 2.7091379149682685e-06,
"loss": 41.3092,
"step": 180
},
{
"epoch": 0.022298192121716097,
"grad_norm": 0.35692593455314636,
"learning_rate": 2.4471741852423237e-06,
"loss": 41.3095,
"step": 181
},
{
"epoch": 0.022421386553327788,
"grad_norm": 0.34302806854248047,
"learning_rate": 2.1982109232821178e-06,
"loss": 41.3083,
"step": 182
},
{
"epoch": 0.022544580984939482,
"grad_norm": 0.3289535939693451,
"learning_rate": 1.962316193157593e-06,
"loss": 41.334,
"step": 183
},
{
"epoch": 0.022667775416551173,
"grad_norm": 0.4378049671649933,
"learning_rate": 1.7395544861325718e-06,
"loss": 41.3029,
"step": 184
},
{
"epoch": 0.022790969848162864,
"grad_norm": 0.3344568610191345,
"learning_rate": 1.5299867030334814e-06,
"loss": 41.3138,
"step": 185
},
{
"epoch": 0.022914164279774555,
"grad_norm": 0.34980833530426025,
"learning_rate": 1.333670137599713e-06,
"loss": 41.3044,
"step": 186
},
{
"epoch": 0.023037358711386246,
"grad_norm": 0.3678795397281647,
"learning_rate": 1.1506584608200367e-06,
"loss": 41.2976,
"step": 187
},
{
"epoch": 0.023160553142997937,
"grad_norm": 0.4266866147518158,
"learning_rate": 9.810017062595322e-07,
"loss": 41.3177,
"step": 188
},
{
"epoch": 0.023283747574609628,
"grad_norm": 0.35492751002311707,
"learning_rate": 8.247462563808817e-07,
"loss": 41.3067,
"step": 189
},
{
"epoch": 0.02340694200622132,
"grad_norm": 0.4533149003982544,
"learning_rate": 6.819348298638839e-07,
"loss": 41.2824,
"step": 190
},
{
"epoch": 0.02353013643783301,
"grad_norm": 0.3437725305557251,
"learning_rate": 5.526064699265753e-07,
"loss": 41.2956,
"step": 191
},
{
"epoch": 0.0236533308694447,
"grad_norm": 0.43365609645843506,
"learning_rate": 4.367965336512403e-07,
"loss": 41.3099,
"step": 192
},
{
"epoch": 0.02377652530105639,
"grad_norm": 0.3725466728210449,
"learning_rate": 3.3453668231809286e-07,
"loss": 41.2875,
"step": 193
},
{
"epoch": 0.023899719732668082,
"grad_norm": 0.3781149685382843,
"learning_rate": 2.458548727494292e-07,
"loss": 41.308,
"step": 194
},
{
"epoch": 0.024022914164279773,
"grad_norm": 0.3700525164604187,
"learning_rate": 1.7077534966650766e-07,
"loss": 41.3202,
"step": 195
},
{
"epoch": 0.024146108595891464,
"grad_norm": 0.315239280462265,
"learning_rate": 1.0931863906127327e-07,
"loss": 41.287,
"step": 196
},
{
"epoch": 0.02426930302750316,
"grad_norm": 0.3573191463947296,
"learning_rate": 6.150154258476315e-08,
"loss": 41.2788,
"step": 197
},
{
"epoch": 0.02439249745911485,
"grad_norm": 0.32416683435440063,
"learning_rate": 2.7337132953697554e-08,
"loss": 41.3028,
"step": 198
},
{
"epoch": 0.02451569189072654,
"grad_norm": 0.4037030041217804,
"learning_rate": 6.834750376549792e-09,
"loss": 41.285,
"step": 199
},
{
"epoch": 0.02463888632233823,
"grad_norm": 0.4670313894748688,
"learning_rate": 0.0,
"loss": 41.2808,
"step": 200
},
{
"epoch": 0.02463888632233823,
"eval_loss": 10.32762336730957,
"eval_runtime": 28.6514,
"eval_samples_per_second": 477.15,
"eval_steps_per_second": 119.296,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 43176645623808.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}