nttx's picture
Training in progress, step 200, checkpoint
1006c47 verified
{
"best_metric": 1.9855155944824219,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.011819981679028398,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 5.909990839514199e-05,
"grad_norm": 14.701292037963867,
"learning_rate": 3.3333333333333333e-06,
"loss": 4.2367,
"step": 1
},
{
"epoch": 5.909990839514199e-05,
"eval_loss": 5.2180633544921875,
"eval_runtime": 685.1037,
"eval_samples_per_second": 41.597,
"eval_steps_per_second": 20.798,
"step": 1
},
{
"epoch": 0.00011819981679028398,
"grad_norm": 17.779050827026367,
"learning_rate": 6.666666666666667e-06,
"loss": 4.8007,
"step": 2
},
{
"epoch": 0.00017729972518542596,
"grad_norm": 15.671966552734375,
"learning_rate": 1e-05,
"loss": 4.2807,
"step": 3
},
{
"epoch": 0.00023639963358056796,
"grad_norm": 15.129173278808594,
"learning_rate": 1.3333333333333333e-05,
"loss": 4.8199,
"step": 4
},
{
"epoch": 0.00029549954197570993,
"grad_norm": 13.176082611083984,
"learning_rate": 1.6666666666666667e-05,
"loss": 4.1705,
"step": 5
},
{
"epoch": 0.0003545994503708519,
"grad_norm": 15.571830749511719,
"learning_rate": 2e-05,
"loss": 4.6829,
"step": 6
},
{
"epoch": 0.0004136993587659939,
"grad_norm": 14.50023078918457,
"learning_rate": 2.3333333333333336e-05,
"loss": 4.5119,
"step": 7
},
{
"epoch": 0.0004727992671611359,
"grad_norm": 14.558034896850586,
"learning_rate": 2.6666666666666667e-05,
"loss": 4.1539,
"step": 8
},
{
"epoch": 0.0005318991755562779,
"grad_norm": 10.521626472473145,
"learning_rate": 3e-05,
"loss": 3.6708,
"step": 9
},
{
"epoch": 0.0005909990839514199,
"grad_norm": 10.474567413330078,
"learning_rate": 3.3333333333333335e-05,
"loss": 3.6137,
"step": 10
},
{
"epoch": 0.0006500989923465619,
"grad_norm": 8.635924339294434,
"learning_rate": 3.6666666666666666e-05,
"loss": 3.2574,
"step": 11
},
{
"epoch": 0.0007091989007417038,
"grad_norm": 13.226350784301758,
"learning_rate": 4e-05,
"loss": 3.3157,
"step": 12
},
{
"epoch": 0.0007682988091368458,
"grad_norm": 8.407894134521484,
"learning_rate": 4.3333333333333334e-05,
"loss": 3.3735,
"step": 13
},
{
"epoch": 0.0008273987175319878,
"grad_norm": 7.1230974197387695,
"learning_rate": 4.666666666666667e-05,
"loss": 3.0306,
"step": 14
},
{
"epoch": 0.0008864986259271298,
"grad_norm": 7.134531021118164,
"learning_rate": 5e-05,
"loss": 2.9162,
"step": 15
},
{
"epoch": 0.0009455985343222718,
"grad_norm": 6.953376770019531,
"learning_rate": 5.333333333333333e-05,
"loss": 2.6598,
"step": 16
},
{
"epoch": 0.0010046984427174138,
"grad_norm": 6.143302917480469,
"learning_rate": 5.666666666666667e-05,
"loss": 2.7871,
"step": 17
},
{
"epoch": 0.0010637983511125558,
"grad_norm": 5.899278163909912,
"learning_rate": 6e-05,
"loss": 2.8841,
"step": 18
},
{
"epoch": 0.0011228982595076977,
"grad_norm": 5.721312999725342,
"learning_rate": 6.333333333333333e-05,
"loss": 2.5476,
"step": 19
},
{
"epoch": 0.0011819981679028397,
"grad_norm": 5.896458625793457,
"learning_rate": 6.666666666666667e-05,
"loss": 2.5949,
"step": 20
},
{
"epoch": 0.0012410980762979818,
"grad_norm": 5.44040584564209,
"learning_rate": 7e-05,
"loss": 2.3684,
"step": 21
},
{
"epoch": 0.0013001979846931238,
"grad_norm": 5.645882606506348,
"learning_rate": 7.333333333333333e-05,
"loss": 2.5693,
"step": 22
},
{
"epoch": 0.0013592978930882656,
"grad_norm": 5.0001606941223145,
"learning_rate": 7.666666666666667e-05,
"loss": 2.1486,
"step": 23
},
{
"epoch": 0.0014183978014834077,
"grad_norm": 5.0876240730285645,
"learning_rate": 8e-05,
"loss": 2.29,
"step": 24
},
{
"epoch": 0.0014774977098785497,
"grad_norm": 5.2396368980407715,
"learning_rate": 8.333333333333334e-05,
"loss": 2.1141,
"step": 25
},
{
"epoch": 0.0015365976182736916,
"grad_norm": 5.895060062408447,
"learning_rate": 8.666666666666667e-05,
"loss": 2.3359,
"step": 26
},
{
"epoch": 0.0015956975266688336,
"grad_norm": 5.065176486968994,
"learning_rate": 9e-05,
"loss": 2.0715,
"step": 27
},
{
"epoch": 0.0016547974350639757,
"grad_norm": 4.62436056137085,
"learning_rate": 9.333333333333334e-05,
"loss": 2.1005,
"step": 28
},
{
"epoch": 0.0017138973434591177,
"grad_norm": 5.012561798095703,
"learning_rate": 9.666666666666667e-05,
"loss": 2.2374,
"step": 29
},
{
"epoch": 0.0017729972518542596,
"grad_norm": 3.960059642791748,
"learning_rate": 0.0001,
"loss": 1.9641,
"step": 30
},
{
"epoch": 0.0018320971602494016,
"grad_norm": 4.35403299331665,
"learning_rate": 9.999146252290264e-05,
"loss": 1.9797,
"step": 31
},
{
"epoch": 0.0018911970686445437,
"grad_norm": 4.6017985343933105,
"learning_rate": 9.996585300715116e-05,
"loss": 2.3441,
"step": 32
},
{
"epoch": 0.0019502969770396855,
"grad_norm": 4.478118419647217,
"learning_rate": 9.99231801983717e-05,
"loss": 1.8137,
"step": 33
},
{
"epoch": 0.0020093968854348276,
"grad_norm": 4.417632102966309,
"learning_rate": 9.986345866928941e-05,
"loss": 2.0547,
"step": 34
},
{
"epoch": 0.0020684967938299696,
"grad_norm": 5.099286079406738,
"learning_rate": 9.978670881475172e-05,
"loss": 2.1801,
"step": 35
},
{
"epoch": 0.0021275967022251117,
"grad_norm": 5.606115341186523,
"learning_rate": 9.96929568447637e-05,
"loss": 2.2527,
"step": 36
},
{
"epoch": 0.0021866966106202537,
"grad_norm": 4.479214668273926,
"learning_rate": 9.958223477553714e-05,
"loss": 2.1026,
"step": 37
},
{
"epoch": 0.0022457965190153953,
"grad_norm": 4.82413911819458,
"learning_rate": 9.94545804185573e-05,
"loss": 1.9819,
"step": 38
},
{
"epoch": 0.0023048964274105374,
"grad_norm": 4.534383773803711,
"learning_rate": 9.931003736767013e-05,
"loss": 1.8833,
"step": 39
},
{
"epoch": 0.0023639963358056794,
"grad_norm": 4.473276615142822,
"learning_rate": 9.91486549841951e-05,
"loss": 2.3477,
"step": 40
},
{
"epoch": 0.0024230962442008215,
"grad_norm": 4.184545993804932,
"learning_rate": 9.89704883800683e-05,
"loss": 2.4562,
"step": 41
},
{
"epoch": 0.0024821961525959635,
"grad_norm": 4.254863739013672,
"learning_rate": 9.877559839902184e-05,
"loss": 2.0597,
"step": 42
},
{
"epoch": 0.0025412960609911056,
"grad_norm": 3.5156869888305664,
"learning_rate": 9.85640515958057e-05,
"loss": 2.0459,
"step": 43
},
{
"epoch": 0.0026003959693862476,
"grad_norm": 3.2430365085601807,
"learning_rate": 9.833592021345937e-05,
"loss": 1.9904,
"step": 44
},
{
"epoch": 0.0026594958777813892,
"grad_norm": 4.73977518081665,
"learning_rate": 9.809128215864097e-05,
"loss": 2.048,
"step": 45
},
{
"epoch": 0.0027185957861765313,
"grad_norm": 3.6739819049835205,
"learning_rate": 9.783022097502204e-05,
"loss": 2.0881,
"step": 46
},
{
"epoch": 0.0027776956945716733,
"grad_norm": 3.2717761993408203,
"learning_rate": 9.755282581475769e-05,
"loss": 2.0992,
"step": 47
},
{
"epoch": 0.0028367956029668154,
"grad_norm": 3.5809576511383057,
"learning_rate": 9.725919140804099e-05,
"loss": 2.0799,
"step": 48
},
{
"epoch": 0.0028958955113619574,
"grad_norm": 4.07869815826416,
"learning_rate": 9.694941803075283e-05,
"loss": 1.9132,
"step": 49
},
{
"epoch": 0.0029549954197570995,
"grad_norm": 3.9852514266967773,
"learning_rate": 9.662361147021779e-05,
"loss": 1.6873,
"step": 50
},
{
"epoch": 0.0029549954197570995,
"eval_loss": 2.1213467121124268,
"eval_runtime": 683.4663,
"eval_samples_per_second": 41.696,
"eval_steps_per_second": 20.848,
"step": 50
},
{
"epoch": 0.0030140953281522415,
"grad_norm": 5.36259126663208,
"learning_rate": 9.628188298907782e-05,
"loss": 2.25,
"step": 51
},
{
"epoch": 0.003073195236547383,
"grad_norm": 4.400323867797852,
"learning_rate": 9.592434928729616e-05,
"loss": 2.3144,
"step": 52
},
{
"epoch": 0.003132295144942525,
"grad_norm": 4.434739112854004,
"learning_rate": 9.555113246230442e-05,
"loss": 2.2359,
"step": 53
},
{
"epoch": 0.0031913950533376673,
"grad_norm": 3.8840668201446533,
"learning_rate": 9.516235996730645e-05,
"loss": 2.1431,
"step": 54
},
{
"epoch": 0.0032504949617328093,
"grad_norm": 3.4554171562194824,
"learning_rate": 9.475816456775313e-05,
"loss": 2.3377,
"step": 55
},
{
"epoch": 0.0033095948701279514,
"grad_norm": 3.1983530521392822,
"learning_rate": 9.43386842960031e-05,
"loss": 2.0718,
"step": 56
},
{
"epoch": 0.0033686947785230934,
"grad_norm": 3.158194065093994,
"learning_rate": 9.39040624041849e-05,
"loss": 2.1517,
"step": 57
},
{
"epoch": 0.0034277946869182355,
"grad_norm": 3.890373468399048,
"learning_rate": 9.345444731527642e-05,
"loss": 2.441,
"step": 58
},
{
"epoch": 0.003486894595313377,
"grad_norm": 3.167813777923584,
"learning_rate": 9.298999257241863e-05,
"loss": 2.2091,
"step": 59
},
{
"epoch": 0.003545994503708519,
"grad_norm": 3.1658525466918945,
"learning_rate": 9.251085678648072e-05,
"loss": 2.0947,
"step": 60
},
{
"epoch": 0.003605094412103661,
"grad_norm": 3.408245325088501,
"learning_rate": 9.201720358189464e-05,
"loss": 2.2448,
"step": 61
},
{
"epoch": 0.0036641943204988032,
"grad_norm": 3.817218542098999,
"learning_rate": 9.150920154077754e-05,
"loss": 2.2507,
"step": 62
},
{
"epoch": 0.0037232942288939453,
"grad_norm": 3.1942789554595947,
"learning_rate": 9.098702414536107e-05,
"loss": 1.9487,
"step": 63
},
{
"epoch": 0.0037823941372890873,
"grad_norm": 4.15613317489624,
"learning_rate": 9.045084971874738e-05,
"loss": 2.2011,
"step": 64
},
{
"epoch": 0.0038414940456842294,
"grad_norm": 3.4071335792541504,
"learning_rate": 8.9900861364012e-05,
"loss": 1.9582,
"step": 65
},
{
"epoch": 0.003900593954079371,
"grad_norm": 3.021066665649414,
"learning_rate": 8.933724690167417e-05,
"loss": 2.3522,
"step": 66
},
{
"epoch": 0.003959693862474513,
"grad_norm": 3.616442918777466,
"learning_rate": 8.876019880555649e-05,
"loss": 2.098,
"step": 67
},
{
"epoch": 0.004018793770869655,
"grad_norm": 2.8962440490722656,
"learning_rate": 8.816991413705516e-05,
"loss": 2.0984,
"step": 68
},
{
"epoch": 0.004077893679264797,
"grad_norm": 3.579745292663574,
"learning_rate": 8.756659447784368e-05,
"loss": 2.0251,
"step": 69
},
{
"epoch": 0.004136993587659939,
"grad_norm": 3.775783061981201,
"learning_rate": 8.695044586103296e-05,
"loss": 2.119,
"step": 70
},
{
"epoch": 0.004196093496055081,
"grad_norm": 3.080296754837036,
"learning_rate": 8.632167870081121e-05,
"loss": 2.2573,
"step": 71
},
{
"epoch": 0.004255193404450223,
"grad_norm": 2.8051774501800537,
"learning_rate": 8.568050772058762e-05,
"loss": 1.9605,
"step": 72
},
{
"epoch": 0.004314293312845365,
"grad_norm": 2.691725730895996,
"learning_rate": 8.502715187966455e-05,
"loss": 1.9226,
"step": 73
},
{
"epoch": 0.004373393221240507,
"grad_norm": 2.9309446811676025,
"learning_rate": 8.436183429846313e-05,
"loss": 1.8635,
"step": 74
},
{
"epoch": 0.0044324931296356495,
"grad_norm": 3.0920376777648926,
"learning_rate": 8.368478218232787e-05,
"loss": 2.3189,
"step": 75
},
{
"epoch": 0.004491593038030791,
"grad_norm": 3.608562707901001,
"learning_rate": 8.299622674393614e-05,
"loss": 2.1573,
"step": 76
},
{
"epoch": 0.004550692946425933,
"grad_norm": 3.4357681274414062,
"learning_rate": 8.229640312433937e-05,
"loss": 2.3833,
"step": 77
},
{
"epoch": 0.004609792854821075,
"grad_norm": 3.2600772380828857,
"learning_rate": 8.158555031266254e-05,
"loss": 1.8943,
"step": 78
},
{
"epoch": 0.004668892763216217,
"grad_norm": 3.176687002182007,
"learning_rate": 8.086391106448965e-05,
"loss": 1.8515,
"step": 79
},
{
"epoch": 0.004727992671611359,
"grad_norm": 3.3577382564544678,
"learning_rate": 8.013173181896283e-05,
"loss": 1.8858,
"step": 80
},
{
"epoch": 0.004787092580006501,
"grad_norm": 3.1467320919036865,
"learning_rate": 7.938926261462366e-05,
"loss": 2.0871,
"step": 81
},
{
"epoch": 0.004846192488401643,
"grad_norm": 2.7516732215881348,
"learning_rate": 7.863675700402526e-05,
"loss": 1.8316,
"step": 82
},
{
"epoch": 0.004905292396796785,
"grad_norm": 3.2467401027679443,
"learning_rate": 7.787447196714427e-05,
"loss": 2.0702,
"step": 83
},
{
"epoch": 0.004964392305191927,
"grad_norm": 3.381272315979004,
"learning_rate": 7.710266782362247e-05,
"loss": 2.2569,
"step": 84
},
{
"epoch": 0.005023492213587069,
"grad_norm": 2.826927661895752,
"learning_rate": 7.63216081438678e-05,
"loss": 2.1583,
"step": 85
},
{
"epoch": 0.005082592121982211,
"grad_norm": 3.259561061859131,
"learning_rate": 7.553155965904535e-05,
"loss": 1.9269,
"step": 86
},
{
"epoch": 0.005141692030377353,
"grad_norm": 3.7768051624298096,
"learning_rate": 7.473279216998895e-05,
"loss": 2.362,
"step": 87
},
{
"epoch": 0.005200791938772495,
"grad_norm": 2.951033592224121,
"learning_rate": 7.392557845506432e-05,
"loss": 2.0554,
"step": 88
},
{
"epoch": 0.005259891847167637,
"grad_norm": 2.699878454208374,
"learning_rate": 7.311019417701566e-05,
"loss": 1.6497,
"step": 89
},
{
"epoch": 0.0053189917555627785,
"grad_norm": 2.780122756958008,
"learning_rate": 7.228691778882693e-05,
"loss": 1.904,
"step": 90
},
{
"epoch": 0.0053780916639579205,
"grad_norm": 2.7070605754852295,
"learning_rate": 7.145603043863045e-05,
"loss": 1.8025,
"step": 91
},
{
"epoch": 0.005437191572353063,
"grad_norm": 2.740710973739624,
"learning_rate": 7.061781587369519e-05,
"loss": 1.9662,
"step": 92
},
{
"epoch": 0.005496291480748205,
"grad_norm": 3.0559332370758057,
"learning_rate": 6.977256034352712e-05,
"loss": 2.0185,
"step": 93
},
{
"epoch": 0.005555391389143347,
"grad_norm": 2.9242119789123535,
"learning_rate": 6.892055250211552e-05,
"loss": 2.0248,
"step": 94
},
{
"epoch": 0.005614491297538489,
"grad_norm": 2.762571334838867,
"learning_rate": 6.806208330935766e-05,
"loss": 1.7682,
"step": 95
},
{
"epoch": 0.005673591205933631,
"grad_norm": 2.9518630504608154,
"learning_rate": 6.719744593169641e-05,
"loss": 1.88,
"step": 96
},
{
"epoch": 0.005732691114328773,
"grad_norm": 2.976564645767212,
"learning_rate": 6.632693564200416e-05,
"loss": 1.8969,
"step": 97
},
{
"epoch": 0.005791791022723915,
"grad_norm": 2.815509796142578,
"learning_rate": 6.545084971874738e-05,
"loss": 1.972,
"step": 98
},
{
"epoch": 0.005850890931119057,
"grad_norm": 3.5287201404571533,
"learning_rate": 6.456948734446624e-05,
"loss": 2.125,
"step": 99
},
{
"epoch": 0.005909990839514199,
"grad_norm": 3.9216277599334717,
"learning_rate": 6.368314950360415e-05,
"loss": 1.9601,
"step": 100
},
{
"epoch": 0.005909990839514199,
"eval_loss": 2.066779136657715,
"eval_runtime": 683.3126,
"eval_samples_per_second": 41.706,
"eval_steps_per_second": 20.853,
"step": 100
},
{
"epoch": 0.005969090747909341,
"grad_norm": 4.6523542404174805,
"learning_rate": 6.279213887972179e-05,
"loss": 2.4316,
"step": 101
},
{
"epoch": 0.006028190656304483,
"grad_norm": 3.620638847351074,
"learning_rate": 6.189675975213094e-05,
"loss": 2.1098,
"step": 102
},
{
"epoch": 0.006087290564699625,
"grad_norm": 3.4210116863250732,
"learning_rate": 6.099731789198344e-05,
"loss": 2.4399,
"step": 103
},
{
"epoch": 0.006146390473094766,
"grad_norm": 3.1519083976745605,
"learning_rate": 6.009412045785051e-05,
"loss": 2.0912,
"step": 104
},
{
"epoch": 0.006205490381489908,
"grad_norm": 2.7077889442443848,
"learning_rate": 5.918747589082853e-05,
"loss": 2.0573,
"step": 105
},
{
"epoch": 0.00626459028988505,
"grad_norm": 3.0147182941436768,
"learning_rate": 5.82776938092065e-05,
"loss": 2.0726,
"step": 106
},
{
"epoch": 0.0063236901982801925,
"grad_norm": 3.317253351211548,
"learning_rate": 5.736508490273188e-05,
"loss": 1.97,
"step": 107
},
{
"epoch": 0.0063827901066753345,
"grad_norm": 3.3307950496673584,
"learning_rate": 5.644996082651017e-05,
"loss": 2.5252,
"step": 108
},
{
"epoch": 0.006441890015070477,
"grad_norm": 3.0444374084472656,
"learning_rate": 5.553263409457504e-05,
"loss": 2.2743,
"step": 109
},
{
"epoch": 0.006500989923465619,
"grad_norm": 2.982682704925537,
"learning_rate": 5.4613417973165106e-05,
"loss": 2.3016,
"step": 110
},
{
"epoch": 0.006560089831860761,
"grad_norm": 2.6371850967407227,
"learning_rate": 5.3692626373743706e-05,
"loss": 2.0164,
"step": 111
},
{
"epoch": 0.006619189740255903,
"grad_norm": 2.7763266563415527,
"learning_rate": 5.27705737457985e-05,
"loss": 2.0413,
"step": 112
},
{
"epoch": 0.006678289648651045,
"grad_norm": 3.169642448425293,
"learning_rate": 5.184757496945726e-05,
"loss": 1.9156,
"step": 113
},
{
"epoch": 0.006737389557046187,
"grad_norm": 2.8326120376586914,
"learning_rate": 5.092394524795649e-05,
"loss": 1.9181,
"step": 114
},
{
"epoch": 0.006796489465441329,
"grad_norm": 2.9549639225006104,
"learning_rate": 5e-05,
"loss": 2.2077,
"step": 115
},
{
"epoch": 0.006855589373836471,
"grad_norm": 2.7871503829956055,
"learning_rate": 4.907605475204352e-05,
"loss": 1.8016,
"step": 116
},
{
"epoch": 0.006914689282231612,
"grad_norm": 2.778618812561035,
"learning_rate": 4.8152425030542766e-05,
"loss": 2.1039,
"step": 117
},
{
"epoch": 0.006973789190626754,
"grad_norm": 3.4482669830322266,
"learning_rate": 4.72294262542015e-05,
"loss": 2.248,
"step": 118
},
{
"epoch": 0.007032889099021896,
"grad_norm": 2.6852567195892334,
"learning_rate": 4.6307373626256306e-05,
"loss": 1.8628,
"step": 119
},
{
"epoch": 0.007091989007417038,
"grad_norm": 2.594987154006958,
"learning_rate": 4.5386582026834906e-05,
"loss": 1.975,
"step": 120
},
{
"epoch": 0.00715108891581218,
"grad_norm": 3.144756555557251,
"learning_rate": 4.446736590542497e-05,
"loss": 2.2441,
"step": 121
},
{
"epoch": 0.007210188824207322,
"grad_norm": 2.6541213989257812,
"learning_rate": 4.3550039173489845e-05,
"loss": 2.1649,
"step": 122
},
{
"epoch": 0.007269288732602464,
"grad_norm": 2.779205083847046,
"learning_rate": 4.2634915097268115e-05,
"loss": 1.8455,
"step": 123
},
{
"epoch": 0.0073283886409976065,
"grad_norm": 2.9109785556793213,
"learning_rate": 4.1722306190793495e-05,
"loss": 1.7731,
"step": 124
},
{
"epoch": 0.0073874885493927485,
"grad_norm": 3.254918098449707,
"learning_rate": 4.0812524109171476e-05,
"loss": 2.2217,
"step": 125
},
{
"epoch": 0.007446588457787891,
"grad_norm": 3.0662970542907715,
"learning_rate": 3.99058795421495e-05,
"loss": 2.0891,
"step": 126
},
{
"epoch": 0.007505688366183033,
"grad_norm": 3.069082021713257,
"learning_rate": 3.9002682108016585e-05,
"loss": 1.9835,
"step": 127
},
{
"epoch": 0.007564788274578175,
"grad_norm": 2.814826726913452,
"learning_rate": 3.8103240247869075e-05,
"loss": 2.012,
"step": 128
},
{
"epoch": 0.007623888182973317,
"grad_norm": 2.723910093307495,
"learning_rate": 3.720786112027822e-05,
"loss": 2.1048,
"step": 129
},
{
"epoch": 0.007682988091368459,
"grad_norm": 2.600010395050049,
"learning_rate": 3.631685049639586e-05,
"loss": 1.6493,
"step": 130
},
{
"epoch": 0.0077420879997636,
"grad_norm": 4.922942638397217,
"learning_rate": 3.543051265553377e-05,
"loss": 2.025,
"step": 131
},
{
"epoch": 0.007801187908158742,
"grad_norm": 3.4721429347991943,
"learning_rate": 3.4549150281252636e-05,
"loss": 2.492,
"step": 132
},
{
"epoch": 0.007860287816553884,
"grad_norm": 2.9339911937713623,
"learning_rate": 3.367306435799584e-05,
"loss": 1.8878,
"step": 133
},
{
"epoch": 0.007919387724949026,
"grad_norm": 2.507004976272583,
"learning_rate": 3.2802554068303596e-05,
"loss": 1.6928,
"step": 134
},
{
"epoch": 0.007978487633344168,
"grad_norm": 2.646677017211914,
"learning_rate": 3.1937916690642356e-05,
"loss": 1.9123,
"step": 135
},
{
"epoch": 0.00803758754173931,
"grad_norm": 3.230515956878662,
"learning_rate": 3.107944749788449e-05,
"loss": 2.1234,
"step": 136
},
{
"epoch": 0.008096687450134452,
"grad_norm": 2.8240675926208496,
"learning_rate": 3.0227439656472877e-05,
"loss": 2.0608,
"step": 137
},
{
"epoch": 0.008155787358529594,
"grad_norm": 3.0936813354492188,
"learning_rate": 2.9382184126304834e-05,
"loss": 1.9963,
"step": 138
},
{
"epoch": 0.008214887266924736,
"grad_norm": 2.895601272583008,
"learning_rate": 2.8543969561369556e-05,
"loss": 1.9524,
"step": 139
},
{
"epoch": 0.008273987175319878,
"grad_norm": 2.7313430309295654,
"learning_rate": 2.771308221117309e-05,
"loss": 1.848,
"step": 140
},
{
"epoch": 0.00833308708371502,
"grad_norm": 2.8361904621124268,
"learning_rate": 2.688980582298435e-05,
"loss": 1.9554,
"step": 141
},
{
"epoch": 0.008392186992110163,
"grad_norm": 2.7524335384368896,
"learning_rate": 2.607442154493568e-05,
"loss": 1.9239,
"step": 142
},
{
"epoch": 0.008451286900505305,
"grad_norm": 3.083930492401123,
"learning_rate": 2.5267207830011068e-05,
"loss": 1.9522,
"step": 143
},
{
"epoch": 0.008510386808900447,
"grad_norm": 2.8543131351470947,
"learning_rate": 2.446844034095466e-05,
"loss": 1.9891,
"step": 144
},
{
"epoch": 0.008569486717295589,
"grad_norm": 2.8486130237579346,
"learning_rate": 2.3678391856132204e-05,
"loss": 2.052,
"step": 145
},
{
"epoch": 0.00862858662569073,
"grad_norm": 2.6929166316986084,
"learning_rate": 2.2897332176377528e-05,
"loss": 1.9629,
"step": 146
},
{
"epoch": 0.008687686534085873,
"grad_norm": 3.058091163635254,
"learning_rate": 2.2125528032855724e-05,
"loss": 1.9577,
"step": 147
},
{
"epoch": 0.008746786442481015,
"grad_norm": 3.2105419635772705,
"learning_rate": 2.136324299597474e-05,
"loss": 2.0027,
"step": 148
},
{
"epoch": 0.008805886350876157,
"grad_norm": 2.93302845954895,
"learning_rate": 2.061073738537635e-05,
"loss": 1.977,
"step": 149
},
{
"epoch": 0.008864986259271299,
"grad_norm": 3.1761159896850586,
"learning_rate": 1.9868268181037185e-05,
"loss": 1.8015,
"step": 150
},
{
"epoch": 0.008864986259271299,
"eval_loss": 2.0016629695892334,
"eval_runtime": 684.7232,
"eval_samples_per_second": 41.62,
"eval_steps_per_second": 20.81,
"step": 150
},
{
"epoch": 0.008924086167666441,
"grad_norm": 3.083522081375122,
"learning_rate": 1.9136088935510362e-05,
"loss": 1.8471,
"step": 151
},
{
"epoch": 0.008983186076061581,
"grad_norm": 3.249023914337158,
"learning_rate": 1.8414449687337464e-05,
"loss": 1.9572,
"step": 152
},
{
"epoch": 0.009042285984456723,
"grad_norm": 2.9980008602142334,
"learning_rate": 1.7703596875660645e-05,
"loss": 2.1264,
"step": 153
},
{
"epoch": 0.009101385892851865,
"grad_norm": 2.857516050338745,
"learning_rate": 1.700377325606388e-05,
"loss": 1.901,
"step": 154
},
{
"epoch": 0.009160485801247007,
"grad_norm": 3.0055689811706543,
"learning_rate": 1.631521781767214e-05,
"loss": 2.192,
"step": 155
},
{
"epoch": 0.00921958570964215,
"grad_norm": 2.5224833488464355,
"learning_rate": 1.5638165701536868e-05,
"loss": 1.9115,
"step": 156
},
{
"epoch": 0.009278685618037292,
"grad_norm": 2.467513084411621,
"learning_rate": 1.4972848120335453e-05,
"loss": 1.7274,
"step": 157
},
{
"epoch": 0.009337785526432434,
"grad_norm": 3.3824963569641113,
"learning_rate": 1.4319492279412388e-05,
"loss": 2.2169,
"step": 158
},
{
"epoch": 0.009396885434827576,
"grad_norm": 2.666517496109009,
"learning_rate": 1.3678321299188801e-05,
"loss": 2.1059,
"step": 159
},
{
"epoch": 0.009455985343222718,
"grad_norm": 3.0350444316864014,
"learning_rate": 1.3049554138967051e-05,
"loss": 2.3778,
"step": 160
},
{
"epoch": 0.00951508525161786,
"grad_norm": 2.6140732765197754,
"learning_rate": 1.2433405522156332e-05,
"loss": 1.8671,
"step": 161
},
{
"epoch": 0.009574185160013002,
"grad_norm": 2.8309125900268555,
"learning_rate": 1.183008586294485e-05,
"loss": 1.977,
"step": 162
},
{
"epoch": 0.009633285068408144,
"grad_norm": 3.1472816467285156,
"learning_rate": 1.1239801194443506e-05,
"loss": 2.4342,
"step": 163
},
{
"epoch": 0.009692384976803286,
"grad_norm": 2.7688024044036865,
"learning_rate": 1.066275309832584e-05,
"loss": 1.9787,
"step": 164
},
{
"epoch": 0.009751484885198428,
"grad_norm": 3.0642006397247314,
"learning_rate": 1.0099138635988026e-05,
"loss": 2.1242,
"step": 165
},
{
"epoch": 0.00981058479359357,
"grad_norm": 2.5731711387634277,
"learning_rate": 9.549150281252633e-06,
"loss": 2.0205,
"step": 166
},
{
"epoch": 0.009869684701988712,
"grad_norm": 2.6096303462982178,
"learning_rate": 9.012975854638949e-06,
"loss": 1.9473,
"step": 167
},
{
"epoch": 0.009928784610383854,
"grad_norm": 2.6382505893707275,
"learning_rate": 8.490798459222476e-06,
"loss": 1.9061,
"step": 168
},
{
"epoch": 0.009987884518778996,
"grad_norm": 3.020261526107788,
"learning_rate": 7.982796418105371e-06,
"loss": 2.3045,
"step": 169
},
{
"epoch": 0.010046984427174138,
"grad_norm": 2.3413009643554688,
"learning_rate": 7.489143213519301e-06,
"loss": 1.7087,
"step": 170
},
{
"epoch": 0.01010608433556928,
"grad_norm": 2.669962167739868,
"learning_rate": 7.010007427581378e-06,
"loss": 2.0177,
"step": 171
},
{
"epoch": 0.010165184243964422,
"grad_norm": 2.9112627506256104,
"learning_rate": 6.5455526847235825e-06,
"loss": 2.2063,
"step": 172
},
{
"epoch": 0.010224284152359564,
"grad_norm": 2.7621371746063232,
"learning_rate": 6.0959375958151045e-06,
"loss": 2.1102,
"step": 173
},
{
"epoch": 0.010283384060754706,
"grad_norm": 2.7470543384552,
"learning_rate": 5.6613157039969055e-06,
"loss": 1.9059,
"step": 174
},
{
"epoch": 0.010342483969149848,
"grad_norm": 2.68990159034729,
"learning_rate": 5.241835432246889e-06,
"loss": 2.0888,
"step": 175
},
{
"epoch": 0.01040158387754499,
"grad_norm": 2.371671676635742,
"learning_rate": 4.837640032693558e-06,
"loss": 1.6942,
"step": 176
},
{
"epoch": 0.010460683785940133,
"grad_norm": 2.7487049102783203,
"learning_rate": 4.448867537695578e-06,
"loss": 1.9798,
"step": 177
},
{
"epoch": 0.010519783694335275,
"grad_norm": 2.5755209922790527,
"learning_rate": 4.075650712703849e-06,
"loss": 1.8144,
"step": 178
},
{
"epoch": 0.010578883602730415,
"grad_norm": 2.740054130554199,
"learning_rate": 3.71811701092219e-06,
"loss": 1.9632,
"step": 179
},
{
"epoch": 0.010637983511125557,
"grad_norm": 2.762751340866089,
"learning_rate": 3.376388529782215e-06,
"loss": 2.0491,
"step": 180
},
{
"epoch": 0.010697083419520699,
"grad_norm": 2.6476433277130127,
"learning_rate": 3.0505819692471792e-06,
"loss": 2.0671,
"step": 181
},
{
"epoch": 0.010756183327915841,
"grad_norm": 3.5005996227264404,
"learning_rate": 2.7408085919590264e-06,
"loss": 2.2838,
"step": 182
},
{
"epoch": 0.010815283236310983,
"grad_norm": 2.557985305786133,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.8317,
"step": 183
},
{
"epoch": 0.010874383144706125,
"grad_norm": 2.7409958839416504,
"learning_rate": 2.1697790249779636e-06,
"loss": 1.9403,
"step": 184
},
{
"epoch": 0.010933483053101267,
"grad_norm": 2.9567854404449463,
"learning_rate": 1.908717841359048e-06,
"loss": 2.0511,
"step": 185
},
{
"epoch": 0.01099258296149641,
"grad_norm": 3.5258986949920654,
"learning_rate": 1.6640797865406288e-06,
"loss": 2.3455,
"step": 186
},
{
"epoch": 0.011051682869891551,
"grad_norm": 2.719268560409546,
"learning_rate": 1.4359484041943038e-06,
"loss": 1.9524,
"step": 187
},
{
"epoch": 0.011110782778286693,
"grad_norm": 3.0413248538970947,
"learning_rate": 1.2244016009781701e-06,
"loss": 2.0141,
"step": 188
},
{
"epoch": 0.011169882686681835,
"grad_norm": 3.6312408447265625,
"learning_rate": 1.0295116199317057e-06,
"loss": 1.986,
"step": 189
},
{
"epoch": 0.011228982595076977,
"grad_norm": 2.7848613262176514,
"learning_rate": 8.513450158049108e-07,
"loss": 1.7297,
"step": 190
},
{
"epoch": 0.01128808250347212,
"grad_norm": 2.962815523147583,
"learning_rate": 6.899626323298713e-07,
"loss": 2.0131,
"step": 191
},
{
"epoch": 0.011347182411867262,
"grad_norm": 3.577907085418701,
"learning_rate": 5.454195814427021e-07,
"loss": 2.4659,
"step": 192
},
{
"epoch": 0.011406282320262404,
"grad_norm": 3.7039952278137207,
"learning_rate": 4.177652244628627e-07,
"loss": 2.0184,
"step": 193
},
{
"epoch": 0.011465382228657546,
"grad_norm": 3.0766096115112305,
"learning_rate": 3.0704315523631953e-07,
"loss": 1.9093,
"step": 194
},
{
"epoch": 0.011524482137052688,
"grad_norm": 2.9538846015930176,
"learning_rate": 2.1329118524827662e-07,
"loss": 1.9372,
"step": 195
},
{
"epoch": 0.01158358204544783,
"grad_norm": 3.276865005493164,
"learning_rate": 1.3654133071059893e-07,
"loss": 2.1478,
"step": 196
},
{
"epoch": 0.011642681953842972,
"grad_norm": 2.88531231880188,
"learning_rate": 7.681980162830282e-08,
"loss": 1.9631,
"step": 197
},
{
"epoch": 0.011701781862238114,
"grad_norm": 2.8093371391296387,
"learning_rate": 3.4146992848854695e-08,
"loss": 1.7216,
"step": 198
},
{
"epoch": 0.011760881770633256,
"grad_norm": 2.859862804412842,
"learning_rate": 8.537477097364522e-09,
"loss": 1.8161,
"step": 199
},
{
"epoch": 0.011819981679028398,
"grad_norm": 3.6518397331237793,
"learning_rate": 0.0,
"loss": 2.0347,
"step": 200
},
{
"epoch": 0.011819981679028398,
"eval_loss": 1.9855155944824219,
"eval_runtime": 684.4118,
"eval_samples_per_second": 41.639,
"eval_steps_per_second": 20.819,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.54569648635904e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}