0x1202's picture
Training in progress, step 100, checkpoint
ec0870e verified
raw
history blame
19 kB
{
"best_metric": NaN,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.010980564401010211,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00010980564401010212,
"grad_norm": 0.005295205395668745,
"learning_rate": 1e-05,
"loss": 12.4581,
"step": 1
},
{
"epoch": 0.00010980564401010212,
"eval_loss": NaN,
"eval_runtime": 105.7149,
"eval_samples_per_second": 145.088,
"eval_steps_per_second": 36.277,
"step": 1
},
{
"epoch": 0.00021961128802020424,
"grad_norm": 0.005485564470291138,
"learning_rate": 2e-05,
"loss": 12.4572,
"step": 2
},
{
"epoch": 0.00032941693203030636,
"grad_norm": 0.004944846499711275,
"learning_rate": 3e-05,
"loss": 12.4557,
"step": 3
},
{
"epoch": 0.0004392225760404085,
"grad_norm": 0.006096905097365379,
"learning_rate": 4e-05,
"loss": 12.4568,
"step": 4
},
{
"epoch": 0.0005490282200505106,
"grad_norm": 0.005918541457504034,
"learning_rate": 5e-05,
"loss": 12.4486,
"step": 5
},
{
"epoch": 0.0006588338640606127,
"grad_norm": 0.008583322167396545,
"learning_rate": 6e-05,
"loss": 12.4563,
"step": 6
},
{
"epoch": 0.0007686395080707148,
"grad_norm": 0.007781935855746269,
"learning_rate": 7e-05,
"loss": 12.4584,
"step": 7
},
{
"epoch": 0.000878445152080817,
"grad_norm": 0.0075157927349209785,
"learning_rate": 8e-05,
"loss": 12.4541,
"step": 8
},
{
"epoch": 0.000988250796090919,
"grad_norm": 0.008060351014137268,
"learning_rate": 9e-05,
"loss": 12.454,
"step": 9
},
{
"epoch": 0.0010980564401010212,
"grad_norm": 0.009477085433900356,
"learning_rate": 0.0001,
"loss": 12.4544,
"step": 10
},
{
"epoch": 0.0012078620841111233,
"grad_norm": 0.00922788679599762,
"learning_rate": 9.999316524962345e-05,
"loss": 12.4502,
"step": 11
},
{
"epoch": 0.0013176677281212254,
"grad_norm": 0.011367438361048698,
"learning_rate": 9.997266286704631e-05,
"loss": 12.4525,
"step": 12
},
{
"epoch": 0.0014274733721313276,
"grad_norm": 0.010452193208038807,
"learning_rate": 9.993849845741524e-05,
"loss": 12.4521,
"step": 13
},
{
"epoch": 0.0015372790161414297,
"grad_norm": 0.012774151749908924,
"learning_rate": 9.989068136093873e-05,
"loss": 12.4511,
"step": 14
},
{
"epoch": 0.0016470846601515318,
"grad_norm": 0.012014582753181458,
"learning_rate": 9.98292246503335e-05,
"loss": 12.4485,
"step": 15
},
{
"epoch": 0.001756890304161634,
"grad_norm": 0.015250686556100845,
"learning_rate": 9.975414512725057e-05,
"loss": 12.4452,
"step": 16
},
{
"epoch": 0.001866695948171736,
"grad_norm": 0.013115638867020607,
"learning_rate": 9.966546331768191e-05,
"loss": 12.4534,
"step": 17
},
{
"epoch": 0.001976501592181838,
"grad_norm": 0.014557444490492344,
"learning_rate": 9.956320346634876e-05,
"loss": 12.4524,
"step": 18
},
{
"epoch": 0.00208630723619194,
"grad_norm": 0.015748385339975357,
"learning_rate": 9.944739353007344e-05,
"loss": 12.4496,
"step": 19
},
{
"epoch": 0.0021961128802020424,
"grad_norm": 0.01659594289958477,
"learning_rate": 9.931806517013612e-05,
"loss": 12.4684,
"step": 20
},
{
"epoch": 0.0023059185242121443,
"grad_norm": 0.019817043095827103,
"learning_rate": 9.917525374361912e-05,
"loss": 12.448,
"step": 21
},
{
"epoch": 0.0024157241682222466,
"grad_norm": 0.02010353095829487,
"learning_rate": 9.901899829374047e-05,
"loss": 12.4573,
"step": 22
},
{
"epoch": 0.0025255298122323485,
"grad_norm": 0.021140344440937042,
"learning_rate": 9.884934153917997e-05,
"loss": 12.4504,
"step": 23
},
{
"epoch": 0.002635335456242451,
"grad_norm": 0.03636853024363518,
"learning_rate": 9.86663298624003e-05,
"loss": 12.4552,
"step": 24
},
{
"epoch": 0.0027451411002525528,
"grad_norm": 0.024292292073369026,
"learning_rate": 9.847001329696653e-05,
"loss": 12.4455,
"step": 25
},
{
"epoch": 0.002854946744262655,
"grad_norm": 0.0418277382850647,
"learning_rate": 9.826044551386744e-05,
"loss": 12.4445,
"step": 26
},
{
"epoch": 0.002964752388272757,
"grad_norm": 0.03029468096792698,
"learning_rate": 9.803768380684242e-05,
"loss": 12.4518,
"step": 27
},
{
"epoch": 0.0030745580322828594,
"grad_norm": 0.03751302883028984,
"learning_rate": 9.780178907671789e-05,
"loss": 12.4348,
"step": 28
},
{
"epoch": 0.0031843636762929613,
"grad_norm": 0.03355684503912926,
"learning_rate": 9.755282581475769e-05,
"loss": 12.4521,
"step": 29
},
{
"epoch": 0.0032941693203030636,
"grad_norm": 0.055962417274713516,
"learning_rate": 9.729086208503174e-05,
"loss": 12.4396,
"step": 30
},
{
"epoch": 0.0034039749643131655,
"grad_norm": 0.05316566675901413,
"learning_rate": 9.701596950580806e-05,
"loss": 12.4507,
"step": 31
},
{
"epoch": 0.003513780608323268,
"grad_norm": 0.052443914115428925,
"learning_rate": 9.672822322997305e-05,
"loss": 12.4335,
"step": 32
},
{
"epoch": 0.0036235862523333697,
"grad_norm": 0.10784019529819489,
"learning_rate": 9.642770192448536e-05,
"loss": 12.4565,
"step": 33
},
{
"epoch": 0.003733391896343472,
"grad_norm": 0.09334560483694077,
"learning_rate": 9.611448774886924e-05,
"loss": 12.4436,
"step": 34
},
{
"epoch": 0.003843197540353574,
"grad_norm": 0.08733153343200684,
"learning_rate": 9.578866633275288e-05,
"loss": 12.4456,
"step": 35
},
{
"epoch": 0.003953003184363676,
"grad_norm": 0.09157176315784454,
"learning_rate": 9.545032675245813e-05,
"loss": 12.4515,
"step": 36
},
{
"epoch": 0.004062808828373779,
"grad_norm": 0.10120932757854462,
"learning_rate": 9.509956150664796e-05,
"loss": 12.452,
"step": 37
},
{
"epoch": 0.00417261447238388,
"grad_norm": 0.1628744900226593,
"learning_rate": 9.473646649103818e-05,
"loss": 12.4513,
"step": 38
},
{
"epoch": 0.0042824201163939824,
"grad_norm": 0.20253495872020721,
"learning_rate": 9.43611409721806e-05,
"loss": 12.459,
"step": 39
},
{
"epoch": 0.004392225760404085,
"grad_norm": 0.1669420748949051,
"learning_rate": 9.397368756032445e-05,
"loss": 12.4468,
"step": 40
},
{
"epoch": 0.004502031404414187,
"grad_norm": 0.1850309669971466,
"learning_rate": 9.357421218136386e-05,
"loss": 12.4537,
"step": 41
},
{
"epoch": 0.004611837048424289,
"grad_norm": 0.1741914004087448,
"learning_rate": 9.316282404787871e-05,
"loss": 12.4632,
"step": 42
},
{
"epoch": 0.004721642692434391,
"grad_norm": 0.23635537922382355,
"learning_rate": 9.273963562927695e-05,
"loss": 12.4505,
"step": 43
},
{
"epoch": 0.004831448336444493,
"grad_norm": 0.1862466037273407,
"learning_rate": 9.230476262104677e-05,
"loss": 12.4435,
"step": 44
},
{
"epoch": 0.004941253980454596,
"grad_norm": 0.3158741295337677,
"learning_rate": 9.185832391312644e-05,
"loss": 12.4488,
"step": 45
},
{
"epoch": 0.005051059624464697,
"grad_norm": 0.2830805778503418,
"learning_rate": 9.140044155740101e-05,
"loss": 12.4452,
"step": 46
},
{
"epoch": 0.005160865268474799,
"grad_norm": 0.31400245428085327,
"learning_rate": 9.093124073433463e-05,
"loss": 12.4199,
"step": 47
},
{
"epoch": 0.005270670912484902,
"grad_norm": 0.40273311734199524,
"learning_rate": 9.045084971874738e-05,
"loss": 12.4289,
"step": 48
},
{
"epoch": 0.005380476556495004,
"grad_norm": 0.48538336157798767,
"learning_rate": 8.995939984474624e-05,
"loss": 12.3998,
"step": 49
},
{
"epoch": 0.0054902822005051055,
"grad_norm": 0.0,
"learning_rate": 8.945702546981969e-05,
"loss": 0.0,
"step": 50
},
{
"epoch": 0.0054902822005051055,
"eval_loss": NaN,
"eval_runtime": 105.2599,
"eval_samples_per_second": 145.715,
"eval_steps_per_second": 36.434,
"step": 50
},
{
"epoch": 0.005600087844515208,
"grad_norm": 0.010938960127532482,
"learning_rate": 8.894386393810563e-05,
"loss": 12.4581,
"step": 51
},
{
"epoch": 0.00570989348852531,
"grad_norm": 0.014923685230314732,
"learning_rate": 8.842005554284296e-05,
"loss": 12.4501,
"step": 52
},
{
"epoch": 0.0058196991325354126,
"grad_norm": 0.01725936494767666,
"learning_rate": 8.788574348801675e-05,
"loss": 12.4562,
"step": 53
},
{
"epoch": 0.005929504776545514,
"grad_norm": 0.019850585609674454,
"learning_rate": 8.73410738492077e-05,
"loss": 12.4529,
"step": 54
},
{
"epoch": 0.006039310420555616,
"grad_norm": 0.020352356135845184,
"learning_rate": 8.678619553365659e-05,
"loss": 12.4549,
"step": 55
},
{
"epoch": 0.006149116064565719,
"grad_norm": 0.023946423083543777,
"learning_rate": 8.622126023955446e-05,
"loss": 12.4548,
"step": 56
},
{
"epoch": 0.006258921708575821,
"grad_norm": 0.020938068628311157,
"learning_rate": 8.564642241456986e-05,
"loss": 12.452,
"step": 57
},
{
"epoch": 0.0063687273525859225,
"grad_norm": 0.023144040256738663,
"learning_rate": 8.506183921362443e-05,
"loss": 12.4524,
"step": 58
},
{
"epoch": 0.006478532996596025,
"grad_norm": 0.021375242620706558,
"learning_rate": 8.44676704559283e-05,
"loss": 12.4549,
"step": 59
},
{
"epoch": 0.006588338640606127,
"grad_norm": 0.025442024692893028,
"learning_rate": 8.386407858128706e-05,
"loss": 12.4489,
"step": 60
},
{
"epoch": 0.0066981442846162295,
"grad_norm": 0.02413768693804741,
"learning_rate": 8.32512286056924e-05,
"loss": 12.4501,
"step": 61
},
{
"epoch": 0.006807949928626331,
"grad_norm": 0.02187233977019787,
"learning_rate": 8.262928807620843e-05,
"loss": 12.4463,
"step": 62
},
{
"epoch": 0.006917755572636433,
"grad_norm": 0.032653287053108215,
"learning_rate": 8.199842702516583e-05,
"loss": 12.4471,
"step": 63
},
{
"epoch": 0.007027561216646536,
"grad_norm": 0.03500797972083092,
"learning_rate": 8.135881792367686e-05,
"loss": 12.4517,
"step": 64
},
{
"epoch": 0.007137366860656638,
"grad_norm": 0.030853718519210815,
"learning_rate": 8.07106356344834e-05,
"loss": 12.4476,
"step": 65
},
{
"epoch": 0.0072471725046667395,
"grad_norm": 0.03660961240530014,
"learning_rate": 8.005405736415126e-05,
"loss": 12.4443,
"step": 66
},
{
"epoch": 0.007356978148676842,
"grad_norm": 0.045098926872015,
"learning_rate": 7.938926261462366e-05,
"loss": 12.4519,
"step": 67
},
{
"epoch": 0.007466783792686944,
"grad_norm": 0.05310133844614029,
"learning_rate": 7.871643313414718e-05,
"loss": 12.4475,
"step": 68
},
{
"epoch": 0.0075765894366970465,
"grad_norm": 0.06399852782487869,
"learning_rate": 7.803575286758364e-05,
"loss": 12.4399,
"step": 69
},
{
"epoch": 0.007686395080707148,
"grad_norm": 0.04524382948875427,
"learning_rate": 7.734740790612136e-05,
"loss": 12.4508,
"step": 70
},
{
"epoch": 0.00779620072471725,
"grad_norm": 0.0656951367855072,
"learning_rate": 7.66515864363997e-05,
"loss": 12.4413,
"step": 71
},
{
"epoch": 0.007906006368727353,
"grad_norm": 0.09559205174446106,
"learning_rate": 7.594847868906076e-05,
"loss": 12.4411,
"step": 72
},
{
"epoch": 0.008015812012737454,
"grad_norm": 0.1631324142217636,
"learning_rate": 7.52382768867422e-05,
"loss": 12.4419,
"step": 73
},
{
"epoch": 0.008125617656747557,
"grad_norm": 0.08460504561662674,
"learning_rate": 7.452117519152542e-05,
"loss": 12.4445,
"step": 74
},
{
"epoch": 0.008235423300757659,
"grad_norm": 0.14222237467765808,
"learning_rate": 7.379736965185368e-05,
"loss": 12.4401,
"step": 75
},
{
"epoch": 0.00834522894476776,
"grad_norm": 0.13685190677642822,
"learning_rate": 7.30670581489344e-05,
"loss": 12.4354,
"step": 76
},
{
"epoch": 0.008455034588777863,
"grad_norm": 0.1764751672744751,
"learning_rate": 7.233044034264034e-05,
"loss": 12.4395,
"step": 77
},
{
"epoch": 0.008564840232787965,
"grad_norm": 0.2350654900074005,
"learning_rate": 7.158771761692464e-05,
"loss": 12.4403,
"step": 78
},
{
"epoch": 0.008674645876798068,
"grad_norm": 0.21367262303829193,
"learning_rate": 7.083909302476453e-05,
"loss": 12.4411,
"step": 79
},
{
"epoch": 0.00878445152080817,
"grad_norm": 0.6251615881919861,
"learning_rate": 7.008477123264848e-05,
"loss": 12.4137,
"step": 80
},
{
"epoch": 0.008894257164818271,
"grad_norm": 0.17940542101860046,
"learning_rate": 6.932495846462261e-05,
"loss": 12.431,
"step": 81
},
{
"epoch": 0.009004062808828374,
"grad_norm": 0.20948448777198792,
"learning_rate": 6.855986244591104e-05,
"loss": 12.4307,
"step": 82
},
{
"epoch": 0.009113868452838476,
"grad_norm": 0.39900630712509155,
"learning_rate": 6.778969234612584e-05,
"loss": 12.4172,
"step": 83
},
{
"epoch": 0.009223674096848577,
"grad_norm": 0.49331218004226685,
"learning_rate": 6.701465872208216e-05,
"loss": 12.4058,
"step": 84
},
{
"epoch": 0.00933347974085868,
"grad_norm": 0.5143928527832031,
"learning_rate": 6.623497346023418e-05,
"loss": 12.4252,
"step": 85
},
{
"epoch": 0.009443285384868782,
"grad_norm": 0.32338955998420715,
"learning_rate": 6.545084971874738e-05,
"loss": 12.4228,
"step": 86
},
{
"epoch": 0.009553091028878885,
"grad_norm": 0.5905637145042419,
"learning_rate": 6.466250186922325e-05,
"loss": 12.4069,
"step": 87
},
{
"epoch": 0.009662896672888987,
"grad_norm": 0.6392964124679565,
"learning_rate": 6.387014543809223e-05,
"loss": 12.4135,
"step": 88
},
{
"epoch": 0.009772702316899088,
"grad_norm": 0.6248819231987,
"learning_rate": 6.307399704769099e-05,
"loss": 12.4015,
"step": 89
},
{
"epoch": 0.009882507960909191,
"grad_norm": 0.5917252898216248,
"learning_rate": 6.227427435703997e-05,
"loss": 12.4128,
"step": 90
},
{
"epoch": 0.009992313604919293,
"grad_norm": 0.8355460166931152,
"learning_rate": 6.147119600233758e-05,
"loss": 12.3783,
"step": 91
},
{
"epoch": 0.010102119248929394,
"grad_norm": 0.945870041847229,
"learning_rate": 6.066498153718735e-05,
"loss": 12.3706,
"step": 92
},
{
"epoch": 0.010211924892939497,
"grad_norm": 0.8929839730262756,
"learning_rate": 5.985585137257401e-05,
"loss": 12.3695,
"step": 93
},
{
"epoch": 0.010321730536949599,
"grad_norm": 0.9245277047157288,
"learning_rate": 5.90440267166055e-05,
"loss": 12.3645,
"step": 94
},
{
"epoch": 0.010431536180959702,
"grad_norm": 1.0025641918182373,
"learning_rate": 5.8229729514036705e-05,
"loss": 12.3544,
"step": 95
},
{
"epoch": 0.010541341824969803,
"grad_norm": 0.8262727856636047,
"learning_rate": 5.74131823855921e-05,
"loss": 12.3882,
"step": 96
},
{
"epoch": 0.010651147468979905,
"grad_norm": 1.0223095417022705,
"learning_rate": 5.6594608567103456e-05,
"loss": 12.3386,
"step": 97
},
{
"epoch": 0.010760953112990008,
"grad_norm": 0.8433359265327454,
"learning_rate": 5.577423184847932e-05,
"loss": 12.3487,
"step": 98
},
{
"epoch": 0.01087075875700011,
"grad_norm": 1.120093584060669,
"learning_rate": 5.495227651252315e-05,
"loss": 12.3204,
"step": 99
},
{
"epoch": 0.010980564401010211,
"grad_norm": 0.0,
"learning_rate": 5.4128967273616625e-05,
"loss": 0.0,
"step": 100
},
{
"epoch": 0.010980564401010211,
"eval_loss": NaN,
"eval_runtime": 105.3319,
"eval_samples_per_second": 145.616,
"eval_steps_per_second": 36.409,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 387594584064.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}