auxyus's picture
Training in progress, step 400, checkpoint
a7530b8 verified
{
"best_metric": 0.7515735626220703,
"best_model_checkpoint": "miner_id_24/checkpoint-400",
"epoch": 0.2536461636017755,
"eval_steps": 50,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0006341154090044388,
"grad_norm": 4.2821364402771,
"learning_rate": 1e-05,
"loss": 5.3646,
"step": 1
},
{
"epoch": 0.0006341154090044388,
"eval_loss": 1.5361766815185547,
"eval_runtime": 296.5915,
"eval_samples_per_second": 8.955,
"eval_steps_per_second": 2.239,
"step": 1
},
{
"epoch": 0.0012682308180088776,
"grad_norm": 4.619045734405518,
"learning_rate": 2e-05,
"loss": 5.7104,
"step": 2
},
{
"epoch": 0.0019023462270133164,
"grad_norm": 6.930375099182129,
"learning_rate": 3e-05,
"loss": 5.8588,
"step": 3
},
{
"epoch": 0.0025364616360177552,
"grad_norm": 3.5030791759490967,
"learning_rate": 4e-05,
"loss": 5.5698,
"step": 4
},
{
"epoch": 0.0031705770450221942,
"grad_norm": 2.9358534812927246,
"learning_rate": 5e-05,
"loss": 5.4895,
"step": 5
},
{
"epoch": 0.003804692454026633,
"grad_norm": 3.55356502532959,
"learning_rate": 6e-05,
"loss": 4.971,
"step": 6
},
{
"epoch": 0.004438807863031071,
"grad_norm": 3.4778835773468018,
"learning_rate": 7e-05,
"loss": 4.8542,
"step": 7
},
{
"epoch": 0.0050729232720355105,
"grad_norm": 2.8969311714172363,
"learning_rate": 8e-05,
"loss": 4.3794,
"step": 8
},
{
"epoch": 0.0057070386810399495,
"grad_norm": 2.9659183025360107,
"learning_rate": 9e-05,
"loss": 4.1775,
"step": 9
},
{
"epoch": 0.0063411540900443885,
"grad_norm": 2.9083251953125,
"learning_rate": 0.0001,
"loss": 4.0787,
"step": 10
},
{
"epoch": 0.006975269499048827,
"grad_norm": 2.144608736038208,
"learning_rate": 9.99983777858264e-05,
"loss": 3.96,
"step": 11
},
{
"epoch": 0.007609384908053266,
"grad_norm": 2.0248992443084717,
"learning_rate": 9.999351124856874e-05,
"loss": 3.7824,
"step": 12
},
{
"epoch": 0.008243500317057704,
"grad_norm": 3.3520021438598633,
"learning_rate": 9.998540070400966e-05,
"loss": 3.6636,
"step": 13
},
{
"epoch": 0.008877615726062143,
"grad_norm": 1.616699457168579,
"learning_rate": 9.997404667843075e-05,
"loss": 3.8141,
"step": 14
},
{
"epoch": 0.009511731135066582,
"grad_norm": 1.5765314102172852,
"learning_rate": 9.995944990857849e-05,
"loss": 3.8561,
"step": 15
},
{
"epoch": 0.010145846544071021,
"grad_norm": 1.6568735837936401,
"learning_rate": 9.994161134161634e-05,
"loss": 3.796,
"step": 16
},
{
"epoch": 0.01077996195307546,
"grad_norm": 1.4620311260223389,
"learning_rate": 9.992053213506334e-05,
"loss": 3.6469,
"step": 17
},
{
"epoch": 0.011414077362079899,
"grad_norm": 1.6220909357070923,
"learning_rate": 9.989621365671902e-05,
"loss": 3.712,
"step": 18
},
{
"epoch": 0.012048192771084338,
"grad_norm": 1.3328747749328613,
"learning_rate": 9.986865748457457e-05,
"loss": 3.6251,
"step": 19
},
{
"epoch": 0.012682308180088777,
"grad_norm": 1.767844319343567,
"learning_rate": 9.983786540671051e-05,
"loss": 3.6623,
"step": 20
},
{
"epoch": 0.013316423589093214,
"grad_norm": 1.543691873550415,
"learning_rate": 9.980383942118066e-05,
"loss": 3.7249,
"step": 21
},
{
"epoch": 0.013950538998097653,
"grad_norm": 1.1989096403121948,
"learning_rate": 9.976658173588244e-05,
"loss": 3.5418,
"step": 22
},
{
"epoch": 0.014584654407102092,
"grad_norm": 1.437749981880188,
"learning_rate": 9.972609476841367e-05,
"loss": 3.5405,
"step": 23
},
{
"epoch": 0.015218769816106531,
"grad_norm": 1.3658512830734253,
"learning_rate": 9.968238114591566e-05,
"loss": 3.4703,
"step": 24
},
{
"epoch": 0.01585288522511097,
"grad_norm": 1.3563766479492188,
"learning_rate": 9.96354437049027e-05,
"loss": 3.6742,
"step": 25
},
{
"epoch": 0.016487000634115408,
"grad_norm": 1.4246914386749268,
"learning_rate": 9.95852854910781e-05,
"loss": 3.5858,
"step": 26
},
{
"epoch": 0.017121116043119847,
"grad_norm": 1.4282399415969849,
"learning_rate": 9.953190975913647e-05,
"loss": 3.4487,
"step": 27
},
{
"epoch": 0.017755231452124286,
"grad_norm": 1.4253621101379395,
"learning_rate": 9.947531997255256e-05,
"loss": 3.415,
"step": 28
},
{
"epoch": 0.018389346861128725,
"grad_norm": 1.4151033163070679,
"learning_rate": 9.941551980335652e-05,
"loss": 3.5219,
"step": 29
},
{
"epoch": 0.019023462270133164,
"grad_norm": 1.5081020593643188,
"learning_rate": 9.935251313189564e-05,
"loss": 3.5514,
"step": 30
},
{
"epoch": 0.019657577679137603,
"grad_norm": 1.3580255508422852,
"learning_rate": 9.928630404658255e-05,
"loss": 3.4421,
"step": 31
},
{
"epoch": 0.020291693088142042,
"grad_norm": 1.3717368841171265,
"learning_rate": 9.921689684362989e-05,
"loss": 3.3642,
"step": 32
},
{
"epoch": 0.02092580849714648,
"grad_norm": 1.3913062810897827,
"learning_rate": 9.914429602677162e-05,
"loss": 3.4983,
"step": 33
},
{
"epoch": 0.02155992390615092,
"grad_norm": 1.3363810777664185,
"learning_rate": 9.906850630697068e-05,
"loss": 3.3011,
"step": 34
},
{
"epoch": 0.02219403931515536,
"grad_norm": 1.3653063774108887,
"learning_rate": 9.898953260211338e-05,
"loss": 3.2758,
"step": 35
},
{
"epoch": 0.022828154724159798,
"grad_norm": 1.2513288259506226,
"learning_rate": 9.890738003669029e-05,
"loss": 3.3022,
"step": 36
},
{
"epoch": 0.023462270133164237,
"grad_norm": 1.301414966583252,
"learning_rate": 9.882205394146361e-05,
"loss": 3.3013,
"step": 37
},
{
"epoch": 0.024096385542168676,
"grad_norm": 1.2664551734924316,
"learning_rate": 9.87335598531214e-05,
"loss": 3.5038,
"step": 38
},
{
"epoch": 0.024730500951173115,
"grad_norm": 1.233097791671753,
"learning_rate": 9.864190351391822e-05,
"loss": 3.2793,
"step": 39
},
{
"epoch": 0.025364616360177554,
"grad_norm": 1.2509174346923828,
"learning_rate": 9.85470908713026e-05,
"loss": 3.3181,
"step": 40
},
{
"epoch": 0.02599873176918199,
"grad_norm": 1.3066641092300415,
"learning_rate": 9.844912807753104e-05,
"loss": 3.2021,
"step": 41
},
{
"epoch": 0.02663284717818643,
"grad_norm": 1.2607433795928955,
"learning_rate": 9.834802148926882e-05,
"loss": 3.0709,
"step": 42
},
{
"epoch": 0.027266962587190868,
"grad_norm": 1.3766813278198242,
"learning_rate": 9.824377766717759e-05,
"loss": 3.175,
"step": 43
},
{
"epoch": 0.027901077996195307,
"grad_norm": 1.3962171077728271,
"learning_rate": 9.813640337548954e-05,
"loss": 3.2248,
"step": 44
},
{
"epoch": 0.028535193405199746,
"grad_norm": 1.5123058557510376,
"learning_rate": 9.802590558156862e-05,
"loss": 3.0562,
"step": 45
},
{
"epoch": 0.029169308814204185,
"grad_norm": 1.3668205738067627,
"learning_rate": 9.791229145545831e-05,
"loss": 2.8726,
"step": 46
},
{
"epoch": 0.029803424223208624,
"grad_norm": 1.6242258548736572,
"learning_rate": 9.779556836941645e-05,
"loss": 2.9806,
"step": 47
},
{
"epoch": 0.030437539632213063,
"grad_norm": 1.840408205986023,
"learning_rate": 9.767574389743682e-05,
"loss": 2.766,
"step": 48
},
{
"epoch": 0.031071655041217502,
"grad_norm": 1.5403425693511963,
"learning_rate": 9.755282581475769e-05,
"loss": 2.825,
"step": 49
},
{
"epoch": 0.03170577045022194,
"grad_norm": 1.665044903755188,
"learning_rate": 9.742682209735727e-05,
"loss": 2.9077,
"step": 50
},
{
"epoch": 0.03170577045022194,
"eval_loss": 0.8847057223320007,
"eval_runtime": 297.6178,
"eval_samples_per_second": 8.924,
"eval_steps_per_second": 2.231,
"step": 50
},
{
"epoch": 0.032339885859226376,
"grad_norm": 3.129869222640991,
"learning_rate": 9.729774092143627e-05,
"loss": 3.7664,
"step": 51
},
{
"epoch": 0.032974001268230815,
"grad_norm": 2.0350043773651123,
"learning_rate": 9.716559066288715e-05,
"loss": 3.6251,
"step": 52
},
{
"epoch": 0.033608116677235254,
"grad_norm": 1.3122864961624146,
"learning_rate": 9.703037989675087e-05,
"loss": 3.5988,
"step": 53
},
{
"epoch": 0.03424223208623969,
"grad_norm": 1.3547205924987793,
"learning_rate": 9.689211739666023e-05,
"loss": 3.4852,
"step": 54
},
{
"epoch": 0.03487634749524413,
"grad_norm": 1.131834864616394,
"learning_rate": 9.675081213427076e-05,
"loss": 3.4682,
"step": 55
},
{
"epoch": 0.03551046290424857,
"grad_norm": 1.118656039237976,
"learning_rate": 9.66064732786784e-05,
"loss": 3.4695,
"step": 56
},
{
"epoch": 0.03614457831325301,
"grad_norm": 1.095979928970337,
"learning_rate": 9.645911019582467e-05,
"loss": 3.3906,
"step": 57
},
{
"epoch": 0.03677869372225745,
"grad_norm": 1.330224633216858,
"learning_rate": 9.630873244788883e-05,
"loss": 3.5858,
"step": 58
},
{
"epoch": 0.03741280913126189,
"grad_norm": 1.1612094640731812,
"learning_rate": 9.615534979266745e-05,
"loss": 3.5082,
"step": 59
},
{
"epoch": 0.03804692454026633,
"grad_norm": 1.1439632177352905,
"learning_rate": 9.599897218294122e-05,
"loss": 3.4478,
"step": 60
},
{
"epoch": 0.03868103994927077,
"grad_norm": 1.480470895767212,
"learning_rate": 9.583960976582913e-05,
"loss": 3.3747,
"step": 61
},
{
"epoch": 0.039315155358275206,
"grad_norm": 1.1436293125152588,
"learning_rate": 9.567727288213005e-05,
"loss": 3.3507,
"step": 62
},
{
"epoch": 0.039949270767279645,
"grad_norm": 1.1501903533935547,
"learning_rate": 9.551197206565173e-05,
"loss": 3.3903,
"step": 63
},
{
"epoch": 0.040583386176284084,
"grad_norm": 1.1135419607162476,
"learning_rate": 9.534371804252728e-05,
"loss": 3.2974,
"step": 64
},
{
"epoch": 0.04121750158528852,
"grad_norm": 1.1008535623550415,
"learning_rate": 9.517252173051911e-05,
"loss": 3.3343,
"step": 65
},
{
"epoch": 0.04185161699429296,
"grad_norm": 1.1229270696640015,
"learning_rate": 9.49983942383106e-05,
"loss": 3.3202,
"step": 66
},
{
"epoch": 0.0424857324032974,
"grad_norm": 1.129658579826355,
"learning_rate": 9.482134686478519e-05,
"loss": 3.4031,
"step": 67
},
{
"epoch": 0.04311984781230184,
"grad_norm": 1.1309914588928223,
"learning_rate": 9.464139109829321e-05,
"loss": 3.3727,
"step": 68
},
{
"epoch": 0.04375396322130628,
"grad_norm": 1.1195210218429565,
"learning_rate": 9.445853861590647e-05,
"loss": 3.3391,
"step": 69
},
{
"epoch": 0.04438807863031072,
"grad_norm": 1.14125657081604,
"learning_rate": 9.42728012826605e-05,
"loss": 3.2907,
"step": 70
},
{
"epoch": 0.04502219403931516,
"grad_norm": 1.1160163879394531,
"learning_rate": 9.408419115078471e-05,
"loss": 3.2616,
"step": 71
},
{
"epoch": 0.045656309448319596,
"grad_norm": 1.1449788808822632,
"learning_rate": 9.389272045892024e-05,
"loss": 3.3349,
"step": 72
},
{
"epoch": 0.046290424857324035,
"grad_norm": 1.096792221069336,
"learning_rate": 9.36984016313259e-05,
"loss": 3.2215,
"step": 73
},
{
"epoch": 0.046924540266328474,
"grad_norm": 1.1410120725631714,
"learning_rate": 9.350124727707197e-05,
"loss": 3.3399,
"step": 74
},
{
"epoch": 0.04755865567533291,
"grad_norm": 1.1690126657485962,
"learning_rate": 9.330127018922194e-05,
"loss": 3.354,
"step": 75
},
{
"epoch": 0.04819277108433735,
"grad_norm": 1.2746645212173462,
"learning_rate": 9.309848334400246e-05,
"loss": 3.3572,
"step": 76
},
{
"epoch": 0.04882688649334179,
"grad_norm": 1.1271394491195679,
"learning_rate": 9.289289989996133e-05,
"loss": 3.3658,
"step": 77
},
{
"epoch": 0.04946100190234623,
"grad_norm": 1.2954553365707397,
"learning_rate": 9.268453319711363e-05,
"loss": 3.2274,
"step": 78
},
{
"epoch": 0.05009511731135067,
"grad_norm": 1.1216834783554077,
"learning_rate": 9.247339675607605e-05,
"loss": 3.267,
"step": 79
},
{
"epoch": 0.05072923272035511,
"grad_norm": 1.1674860715866089,
"learning_rate": 9.225950427718975e-05,
"loss": 3.3223,
"step": 80
},
{
"epoch": 0.05136334812935954,
"grad_norm": 1.0981123447418213,
"learning_rate": 9.204286963963111e-05,
"loss": 3.1553,
"step": 81
},
{
"epoch": 0.05199746353836398,
"grad_norm": 1.2098571062088013,
"learning_rate": 9.182350690051133e-05,
"loss": 3.287,
"step": 82
},
{
"epoch": 0.05263157894736842,
"grad_norm": 1.128381371498108,
"learning_rate": 9.160143029396422e-05,
"loss": 3.26,
"step": 83
},
{
"epoch": 0.05326569435637286,
"grad_norm": 1.2328784465789795,
"learning_rate": 9.13766542302225e-05,
"loss": 3.3351,
"step": 84
},
{
"epoch": 0.053899809765377296,
"grad_norm": 1.1560747623443604,
"learning_rate": 9.114919329468282e-05,
"loss": 3.268,
"step": 85
},
{
"epoch": 0.054533925174381735,
"grad_norm": 1.191244125366211,
"learning_rate": 9.091906224695935e-05,
"loss": 3.1744,
"step": 86
},
{
"epoch": 0.055168040583386174,
"grad_norm": 1.1172856092453003,
"learning_rate": 9.068627601992598e-05,
"loss": 3.1647,
"step": 87
},
{
"epoch": 0.05580215599239061,
"grad_norm": 1.1811506748199463,
"learning_rate": 9.045084971874738e-05,
"loss": 3.1507,
"step": 88
},
{
"epoch": 0.05643627140139505,
"grad_norm": 1.2562776803970337,
"learning_rate": 9.021279861989885e-05,
"loss": 3.1243,
"step": 89
},
{
"epoch": 0.05707038681039949,
"grad_norm": 1.2073371410369873,
"learning_rate": 8.997213817017507e-05,
"loss": 3.2071,
"step": 90
},
{
"epoch": 0.05770450221940393,
"grad_norm": 1.179689884185791,
"learning_rate": 8.972888398568772e-05,
"loss": 3.1118,
"step": 91
},
{
"epoch": 0.05833861762840837,
"grad_norm": 1.1589559316635132,
"learning_rate": 8.948305185085225e-05,
"loss": 3.0971,
"step": 92
},
{
"epoch": 0.05897273303741281,
"grad_norm": 1.5334442853927612,
"learning_rate": 8.92346577173636e-05,
"loss": 2.9583,
"step": 93
},
{
"epoch": 0.05960684844641725,
"grad_norm": 1.1919718980789185,
"learning_rate": 8.898371770316111e-05,
"loss": 2.9499,
"step": 94
},
{
"epoch": 0.060240963855421686,
"grad_norm": 1.2710996866226196,
"learning_rate": 8.873024809138272e-05,
"loss": 3.0539,
"step": 95
},
{
"epoch": 0.060875079264426125,
"grad_norm": 1.2094289064407349,
"learning_rate": 8.847426532930831e-05,
"loss": 2.9798,
"step": 96
},
{
"epoch": 0.061509194673430564,
"grad_norm": 1.2845017910003662,
"learning_rate": 8.821578602729242e-05,
"loss": 2.7427,
"step": 97
},
{
"epoch": 0.062143310082435003,
"grad_norm": 1.26568603515625,
"learning_rate": 8.795482695768658e-05,
"loss": 2.7497,
"step": 98
},
{
"epoch": 0.06277742549143944,
"grad_norm": 1.4030022621154785,
"learning_rate": 8.769140505375085e-05,
"loss": 2.6945,
"step": 99
},
{
"epoch": 0.06341154090044387,
"grad_norm": 1.5023269653320312,
"learning_rate": 8.742553740855506e-05,
"loss": 2.8778,
"step": 100
},
{
"epoch": 0.06341154090044387,
"eval_loss": 0.8155827522277832,
"eval_runtime": 297.7632,
"eval_samples_per_second": 8.92,
"eval_steps_per_second": 2.23,
"step": 100
},
{
"epoch": 0.06404565630944832,
"grad_norm": 1.5373103618621826,
"learning_rate": 8.715724127386972e-05,
"loss": 3.477,
"step": 101
},
{
"epoch": 0.06467977171845275,
"grad_norm": 1.278006911277771,
"learning_rate": 8.688653405904652e-05,
"loss": 3.476,
"step": 102
},
{
"epoch": 0.0653138871274572,
"grad_norm": 1.0467379093170166,
"learning_rate": 8.661343332988869e-05,
"loss": 3.3155,
"step": 103
},
{
"epoch": 0.06594800253646163,
"grad_norm": 1.3283839225769043,
"learning_rate": 8.633795680751116e-05,
"loss": 3.4751,
"step": 104
},
{
"epoch": 0.06658211794546608,
"grad_norm": 1.1208345890045166,
"learning_rate": 8.606012236719073e-05,
"loss": 3.228,
"step": 105
},
{
"epoch": 0.06721623335447051,
"grad_norm": 1.0446490049362183,
"learning_rate": 8.577994803720606e-05,
"loss": 3.3327,
"step": 106
},
{
"epoch": 0.06785034876347495,
"grad_norm": 1.0248538255691528,
"learning_rate": 8.549745199766792e-05,
"loss": 3.348,
"step": 107
},
{
"epoch": 0.06848446417247939,
"grad_norm": 1.1333471536636353,
"learning_rate": 8.521265257933948e-05,
"loss": 3.3869,
"step": 108
},
{
"epoch": 0.06911857958148383,
"grad_norm": 1.1693687438964844,
"learning_rate": 8.492556826244687e-05,
"loss": 3.326,
"step": 109
},
{
"epoch": 0.06975269499048826,
"grad_norm": 1.0264679193496704,
"learning_rate": 8.463621767547998e-05,
"loss": 3.2675,
"step": 110
},
{
"epoch": 0.07038681039949271,
"grad_norm": 1.103921890258789,
"learning_rate": 8.434461959398376e-05,
"loss": 3.354,
"step": 111
},
{
"epoch": 0.07102092580849714,
"grad_norm": 1.0422513484954834,
"learning_rate": 8.405079293933986e-05,
"loss": 3.2956,
"step": 112
},
{
"epoch": 0.07165504121750159,
"grad_norm": 1.0689914226531982,
"learning_rate": 8.375475677753881e-05,
"loss": 3.3304,
"step": 113
},
{
"epoch": 0.07228915662650602,
"grad_norm": 1.0220685005187988,
"learning_rate": 8.345653031794292e-05,
"loss": 3.3593,
"step": 114
},
{
"epoch": 0.07292327203551047,
"grad_norm": 1.1142345666885376,
"learning_rate": 8.315613291203976e-05,
"loss": 3.295,
"step": 115
},
{
"epoch": 0.0735573874445149,
"grad_norm": 1.048160433769226,
"learning_rate": 8.285358405218655e-05,
"loss": 3.2746,
"step": 116
},
{
"epoch": 0.07419150285351934,
"grad_norm": 1.0649545192718506,
"learning_rate": 8.25489033703452e-05,
"loss": 3.3655,
"step": 117
},
{
"epoch": 0.07482561826252378,
"grad_norm": 1.0890159606933594,
"learning_rate": 8.224211063680853e-05,
"loss": 3.3308,
"step": 118
},
{
"epoch": 0.07545973367152822,
"grad_norm": 1.0687116384506226,
"learning_rate": 8.19332257589174e-05,
"loss": 3.1868,
"step": 119
},
{
"epoch": 0.07609384908053266,
"grad_norm": 1.1016745567321777,
"learning_rate": 8.162226877976887e-05,
"loss": 3.2142,
"step": 120
},
{
"epoch": 0.0767279644895371,
"grad_norm": 1.051974892616272,
"learning_rate": 8.130925987691569e-05,
"loss": 3.3221,
"step": 121
},
{
"epoch": 0.07736207989854153,
"grad_norm": 1.0410847663879395,
"learning_rate": 8.099421936105702e-05,
"loss": 3.1397,
"step": 122
},
{
"epoch": 0.07799619530754598,
"grad_norm": 1.072290301322937,
"learning_rate": 8.067716767472045e-05,
"loss": 3.3434,
"step": 123
},
{
"epoch": 0.07863031071655041,
"grad_norm": 1.0379129648208618,
"learning_rate": 8.035812539093557e-05,
"loss": 3.25,
"step": 124
},
{
"epoch": 0.07926442612555486,
"grad_norm": 1.0454907417297363,
"learning_rate": 8.003711321189895e-05,
"loss": 3.2054,
"step": 125
},
{
"epoch": 0.07989854153455929,
"grad_norm": 1.0810647010803223,
"learning_rate": 7.971415196763088e-05,
"loss": 3.1932,
"step": 126
},
{
"epoch": 0.08053265694356374,
"grad_norm": 1.0763037204742432,
"learning_rate": 7.938926261462366e-05,
"loss": 3.2082,
"step": 127
},
{
"epoch": 0.08116677235256817,
"grad_norm": 1.1298437118530273,
"learning_rate": 7.906246623448183e-05,
"loss": 3.1894,
"step": 128
},
{
"epoch": 0.0818008877615726,
"grad_norm": 1.0926628112792969,
"learning_rate": 7.873378403255419e-05,
"loss": 3.1941,
"step": 129
},
{
"epoch": 0.08243500317057705,
"grad_norm": 1.0685158967971802,
"learning_rate": 7.840323733655778e-05,
"loss": 3.169,
"step": 130
},
{
"epoch": 0.08306911857958148,
"grad_norm": 1.0912131071090698,
"learning_rate": 7.807084759519405e-05,
"loss": 3.1239,
"step": 131
},
{
"epoch": 0.08370323398858592,
"grad_norm": 1.1438196897506714,
"learning_rate": 7.773663637675694e-05,
"loss": 3.273,
"step": 132
},
{
"epoch": 0.08433734939759036,
"grad_norm": 1.1781691312789917,
"learning_rate": 7.740062536773352e-05,
"loss": 3.3073,
"step": 133
},
{
"epoch": 0.0849714648065948,
"grad_norm": 1.149361252784729,
"learning_rate": 7.706283637139658e-05,
"loss": 3.2185,
"step": 134
},
{
"epoch": 0.08560558021559923,
"grad_norm": 1.1654759645462036,
"learning_rate": 7.672329130639005e-05,
"loss": 3.1729,
"step": 135
},
{
"epoch": 0.08623969562460368,
"grad_norm": 1.3615312576293945,
"learning_rate": 7.638201220530665e-05,
"loss": 3.1193,
"step": 136
},
{
"epoch": 0.08687381103360811,
"grad_norm": 1.0837875604629517,
"learning_rate": 7.603902121325813e-05,
"loss": 3.1017,
"step": 137
},
{
"epoch": 0.08750792644261256,
"grad_norm": 1.105128526687622,
"learning_rate": 7.569434058643844e-05,
"loss": 3.1515,
"step": 138
},
{
"epoch": 0.08814204185161699,
"grad_norm": 1.0796869993209839,
"learning_rate": 7.534799269067953e-05,
"loss": 3.1271,
"step": 139
},
{
"epoch": 0.08877615726062144,
"grad_norm": 1.2005115747451782,
"learning_rate": 7.500000000000001e-05,
"loss": 3.131,
"step": 140
},
{
"epoch": 0.08941027266962587,
"grad_norm": 1.122515082359314,
"learning_rate": 7.465038509514688e-05,
"loss": 3.1134,
"step": 141
},
{
"epoch": 0.09004438807863031,
"grad_norm": 1.1972311735153198,
"learning_rate": 7.42991706621303e-05,
"loss": 3.0486,
"step": 142
},
{
"epoch": 0.09067850348763475,
"grad_norm": 1.140086054801941,
"learning_rate": 7.394637949075154e-05,
"loss": 2.9324,
"step": 143
},
{
"epoch": 0.09131261889663919,
"grad_norm": 1.3415659666061401,
"learning_rate": 7.35920344731241e-05,
"loss": 3.0243,
"step": 144
},
{
"epoch": 0.09194673430564362,
"grad_norm": 1.196213722229004,
"learning_rate": 7.323615860218843e-05,
"loss": 3.0405,
"step": 145
},
{
"epoch": 0.09258084971464807,
"grad_norm": 1.3614180088043213,
"learning_rate": 7.287877497021978e-05,
"loss": 2.8349,
"step": 146
},
{
"epoch": 0.0932149651236525,
"grad_norm": 1.288540244102478,
"learning_rate": 7.251990676732984e-05,
"loss": 2.7361,
"step": 147
},
{
"epoch": 0.09384908053265695,
"grad_norm": 1.2313977479934692,
"learning_rate": 7.215957727996207e-05,
"loss": 2.8024,
"step": 148
},
{
"epoch": 0.09448319594166138,
"grad_norm": 1.2483394145965576,
"learning_rate": 7.179780988938051e-05,
"loss": 2.5696,
"step": 149
},
{
"epoch": 0.09511731135066583,
"grad_norm": 1.4693834781646729,
"learning_rate": 7.143462807015271e-05,
"loss": 2.6114,
"step": 150
},
{
"epoch": 0.09511731135066583,
"eval_loss": 0.8000059723854065,
"eval_runtime": 299.1522,
"eval_samples_per_second": 8.878,
"eval_steps_per_second": 2.22,
"step": 150
},
{
"epoch": 0.09575142675967026,
"grad_norm": 1.7044169902801514,
"learning_rate": 7.107005538862646e-05,
"loss": 4.1348,
"step": 151
},
{
"epoch": 0.0963855421686747,
"grad_norm": 1.5047065019607544,
"learning_rate": 7.07041155014006e-05,
"loss": 3.3954,
"step": 152
},
{
"epoch": 0.09701965757767914,
"grad_norm": 1.2422665357589722,
"learning_rate": 7.033683215379002e-05,
"loss": 3.3153,
"step": 153
},
{
"epoch": 0.09765377298668358,
"grad_norm": 5.329709053039551,
"learning_rate": 6.996822917828477e-05,
"loss": 3.3014,
"step": 154
},
{
"epoch": 0.09828788839568801,
"grad_norm": 0.9941044449806213,
"learning_rate": 6.959833049300377e-05,
"loss": 3.1952,
"step": 155
},
{
"epoch": 0.09892200380469246,
"grad_norm": 1.0377179384231567,
"learning_rate": 6.922716010014255e-05,
"loss": 3.3715,
"step": 156
},
{
"epoch": 0.09955611921369689,
"grad_norm": 1.0825616121292114,
"learning_rate": 6.885474208441603e-05,
"loss": 3.238,
"step": 157
},
{
"epoch": 0.10019023462270134,
"grad_norm": 1.0486564636230469,
"learning_rate": 6.848110061149556e-05,
"loss": 3.2671,
"step": 158
},
{
"epoch": 0.10082435003170577,
"grad_norm": 1.0950785875320435,
"learning_rate": 6.810625992644085e-05,
"loss": 3.387,
"step": 159
},
{
"epoch": 0.10145846544071022,
"grad_norm": 1.0026979446411133,
"learning_rate": 6.773024435212678e-05,
"loss": 3.2341,
"step": 160
},
{
"epoch": 0.10209258084971465,
"grad_norm": 1.0109604597091675,
"learning_rate": 6.735307828766515e-05,
"loss": 3.1852,
"step": 161
},
{
"epoch": 0.10272669625871908,
"grad_norm": 1.0047856569290161,
"learning_rate": 6.697478620682137e-05,
"loss": 3.316,
"step": 162
},
{
"epoch": 0.10336081166772353,
"grad_norm": 1.0846145153045654,
"learning_rate": 6.659539265642643e-05,
"loss": 3.2932,
"step": 163
},
{
"epoch": 0.10399492707672796,
"grad_norm": 1.1783442497253418,
"learning_rate": 6.621492225478414e-05,
"loss": 3.2566,
"step": 164
},
{
"epoch": 0.1046290424857324,
"grad_norm": 1.1924242973327637,
"learning_rate": 6.583339969007363e-05,
"loss": 3.2032,
"step": 165
},
{
"epoch": 0.10526315789473684,
"grad_norm": 1.0744683742523193,
"learning_rate": 6.545084971874738e-05,
"loss": 3.2983,
"step": 166
},
{
"epoch": 0.10589727330374128,
"grad_norm": 1.0177804231643677,
"learning_rate": 6.506729716392481e-05,
"loss": 3.1444,
"step": 167
},
{
"epoch": 0.10653138871274571,
"grad_norm": 1.000748872756958,
"learning_rate": 6.468276691378155e-05,
"loss": 3.1848,
"step": 168
},
{
"epoch": 0.10716550412175016,
"grad_norm": 0.9723776578903198,
"learning_rate": 6.429728391993446e-05,
"loss": 3.0663,
"step": 169
},
{
"epoch": 0.10779961953075459,
"grad_norm": 1.0768924951553345,
"learning_rate": 6.391087319582264e-05,
"loss": 3.4086,
"step": 170
},
{
"epoch": 0.10843373493975904,
"grad_norm": 1.0122095346450806,
"learning_rate": 6.35235598150842e-05,
"loss": 3.1676,
"step": 171
},
{
"epoch": 0.10906785034876347,
"grad_norm": 0.9996663331985474,
"learning_rate": 6.313536890992935e-05,
"loss": 3.226,
"step": 172
},
{
"epoch": 0.10970196575776792,
"grad_norm": 1.062509298324585,
"learning_rate": 6.274632566950967e-05,
"loss": 3.2257,
"step": 173
},
{
"epoch": 0.11033608116677235,
"grad_norm": 1.0289890766143799,
"learning_rate": 6.235645533828349e-05,
"loss": 3.2668,
"step": 174
},
{
"epoch": 0.1109701965757768,
"grad_norm": 1.0083286762237549,
"learning_rate": 6.19657832143779e-05,
"loss": 3.1725,
"step": 175
},
{
"epoch": 0.11160431198478123,
"grad_norm": 1.052514672279358,
"learning_rate": 6.157433464794716e-05,
"loss": 3.2222,
"step": 176
},
{
"epoch": 0.11223842739378567,
"grad_norm": 1.099542260169983,
"learning_rate": 6.118213503952779e-05,
"loss": 3.2265,
"step": 177
},
{
"epoch": 0.1128725428027901,
"grad_norm": 1.0204391479492188,
"learning_rate": 6.078920983839031e-05,
"loss": 3.221,
"step": 178
},
{
"epoch": 0.11350665821179455,
"grad_norm": 1.131372094154358,
"learning_rate": 6.0395584540887963e-05,
"loss": 3.1018,
"step": 179
},
{
"epoch": 0.11414077362079898,
"grad_norm": 1.0413641929626465,
"learning_rate": 6.0001284688802226e-05,
"loss": 3.1977,
"step": 180
},
{
"epoch": 0.11477488902980343,
"grad_norm": 1.0400148630142212,
"learning_rate": 5.960633586768543e-05,
"loss": 3.2413,
"step": 181
},
{
"epoch": 0.11540900443880786,
"grad_norm": 1.1186673641204834,
"learning_rate": 5.921076370520058e-05,
"loss": 3.1271,
"step": 182
},
{
"epoch": 0.1160431198478123,
"grad_norm": 5.378909587860107,
"learning_rate": 5.8814593869458455e-05,
"loss": 3.017,
"step": 183
},
{
"epoch": 0.11667723525681674,
"grad_norm": 1.0467098951339722,
"learning_rate": 5.841785206735192e-05,
"loss": 3.1646,
"step": 184
},
{
"epoch": 0.11731135066582118,
"grad_norm": 1.2054715156555176,
"learning_rate": 5.8020564042888015e-05,
"loss": 3.1296,
"step": 185
},
{
"epoch": 0.11794546607482562,
"grad_norm": 1.1292742490768433,
"learning_rate": 5.762275557551727e-05,
"loss": 3.0876,
"step": 186
},
{
"epoch": 0.11857958148383006,
"grad_norm": 1.0788121223449707,
"learning_rate": 5.7224452478461064e-05,
"loss": 3.0394,
"step": 187
},
{
"epoch": 0.1192136968928345,
"grad_norm": 1.117724895477295,
"learning_rate": 5.682568059703659e-05,
"loss": 3.2083,
"step": 188
},
{
"epoch": 0.11984781230183894,
"grad_norm": 1.1697856187820435,
"learning_rate": 5.642646580697973e-05,
"loss": 3.1939,
"step": 189
},
{
"epoch": 0.12048192771084337,
"grad_norm": 1.1887933015823364,
"learning_rate": 5.602683401276615e-05,
"loss": 3.022,
"step": 190
},
{
"epoch": 0.12111604311984782,
"grad_norm": 1.121009111404419,
"learning_rate": 5.562681114593028e-05,
"loss": 2.9906,
"step": 191
},
{
"epoch": 0.12175015852885225,
"grad_norm": 1.1305354833602905,
"learning_rate": 5.522642316338268e-05,
"loss": 3.0668,
"step": 192
},
{
"epoch": 0.1223842739378567,
"grad_norm": 1.2224658727645874,
"learning_rate": 5.482569604572576e-05,
"loss": 3.1373,
"step": 193
},
{
"epoch": 0.12301838934686113,
"grad_norm": 1.1487756967544556,
"learning_rate": 5.442465579556793e-05,
"loss": 2.9432,
"step": 194
},
{
"epoch": 0.12365250475586556,
"grad_norm": 1.124640703201294,
"learning_rate": 5.402332843583631e-05,
"loss": 2.8273,
"step": 195
},
{
"epoch": 0.12428662016487001,
"grad_norm": 1.1626677513122559,
"learning_rate": 5.3621740008088126e-05,
"loss": 2.7136,
"step": 196
},
{
"epoch": 0.12492073557387444,
"grad_norm": 1.2303059101104736,
"learning_rate": 5.321991657082097e-05,
"loss": 2.5001,
"step": 197
},
{
"epoch": 0.12555485098287889,
"grad_norm": 1.3254761695861816,
"learning_rate": 5.281788419778187e-05,
"loss": 2.637,
"step": 198
},
{
"epoch": 0.12618896639188332,
"grad_norm": 1.3234314918518066,
"learning_rate": 5.2415668976275355e-05,
"loss": 2.5926,
"step": 199
},
{
"epoch": 0.12682308180088775,
"grad_norm": 1.525715708732605,
"learning_rate": 5.201329700547076e-05,
"loss": 2.5566,
"step": 200
},
{
"epoch": 0.12682308180088775,
"eval_loss": 0.7819186449050903,
"eval_runtime": 298.2576,
"eval_samples_per_second": 8.905,
"eval_steps_per_second": 2.226,
"step": 200
},
{
"epoch": 0.1274571972098922,
"grad_norm": 1.2543182373046875,
"learning_rate": 5.161079439470866e-05,
"loss": 3.6287,
"step": 201
},
{
"epoch": 0.12809131261889664,
"grad_norm": 1.2177551984786987,
"learning_rate": 5.1208187261806615e-05,
"loss": 3.3176,
"step": 202
},
{
"epoch": 0.12872542802790107,
"grad_norm": 1.089298129081726,
"learning_rate": 5.080550173136457e-05,
"loss": 3.3302,
"step": 203
},
{
"epoch": 0.1293595434369055,
"grad_norm": 0.9786179065704346,
"learning_rate": 5.0402763933069496e-05,
"loss": 3.2146,
"step": 204
},
{
"epoch": 0.12999365884590997,
"grad_norm": 0.9849365949630737,
"learning_rate": 5e-05,
"loss": 3.2046,
"step": 205
},
{
"epoch": 0.1306277742549144,
"grad_norm": 0.9649850130081177,
"learning_rate": 4.9597236066930516e-05,
"loss": 3.3047,
"step": 206
},
{
"epoch": 0.13126188966391883,
"grad_norm": 0.9902065992355347,
"learning_rate": 4.919449826863544e-05,
"loss": 3.2021,
"step": 207
},
{
"epoch": 0.13189600507292326,
"grad_norm": 0.9828966856002808,
"learning_rate": 4.87918127381934e-05,
"loss": 3.2409,
"step": 208
},
{
"epoch": 0.13253012048192772,
"grad_norm": 1.0174707174301147,
"learning_rate": 4.8389205605291365e-05,
"loss": 3.3236,
"step": 209
},
{
"epoch": 0.13316423589093215,
"grad_norm": 0.9697160124778748,
"learning_rate": 4.798670299452926e-05,
"loss": 3.2682,
"step": 210
},
{
"epoch": 0.13379835129993659,
"grad_norm": 1.0232049226760864,
"learning_rate": 4.758433102372466e-05,
"loss": 3.2173,
"step": 211
},
{
"epoch": 0.13443246670894102,
"grad_norm": 0.9926899671554565,
"learning_rate": 4.7182115802218126e-05,
"loss": 3.2237,
"step": 212
},
{
"epoch": 0.13506658211794548,
"grad_norm": 1.0102065801620483,
"learning_rate": 4.678008342917903e-05,
"loss": 3.1939,
"step": 213
},
{
"epoch": 0.1357006975269499,
"grad_norm": 1.0068438053131104,
"learning_rate": 4.6378259991911886e-05,
"loss": 3.2868,
"step": 214
},
{
"epoch": 0.13633481293595434,
"grad_norm": 0.9734184741973877,
"learning_rate": 4.597667156416371e-05,
"loss": 3.1584,
"step": 215
},
{
"epoch": 0.13696892834495877,
"grad_norm": 15.009803771972656,
"learning_rate": 4.5575344204432084e-05,
"loss": 3.2934,
"step": 216
},
{
"epoch": 0.13760304375396323,
"grad_norm": 0.9599727988243103,
"learning_rate": 4.5174303954274244e-05,
"loss": 3.1804,
"step": 217
},
{
"epoch": 0.13823715916296767,
"grad_norm": 1.007860779762268,
"learning_rate": 4.477357683661734e-05,
"loss": 3.2044,
"step": 218
},
{
"epoch": 0.1388712745719721,
"grad_norm": 0.9852535128593445,
"learning_rate": 4.437318885406973e-05,
"loss": 3.0663,
"step": 219
},
{
"epoch": 0.13950538998097653,
"grad_norm": 0.9651182293891907,
"learning_rate": 4.397316598723385e-05,
"loss": 3.2841,
"step": 220
},
{
"epoch": 0.140139505389981,
"grad_norm": 0.9738379120826721,
"learning_rate": 4.3573534193020274e-05,
"loss": 3.1556,
"step": 221
},
{
"epoch": 0.14077362079898542,
"grad_norm": 1.0059845447540283,
"learning_rate": 4.317431940296343e-05,
"loss": 3.1792,
"step": 222
},
{
"epoch": 0.14140773620798985,
"grad_norm": 1.044323444366455,
"learning_rate": 4.277554752153895e-05,
"loss": 3.029,
"step": 223
},
{
"epoch": 0.14204185161699429,
"grad_norm": 1.0486642122268677,
"learning_rate": 4.237724442448273e-05,
"loss": 3.1771,
"step": 224
},
{
"epoch": 0.14267596702599875,
"grad_norm": 1.0943423509597778,
"learning_rate": 4.197943595711198e-05,
"loss": 3.0797,
"step": 225
},
{
"epoch": 0.14331008243500318,
"grad_norm": 1.032805323600769,
"learning_rate": 4.1582147932648074e-05,
"loss": 3.2204,
"step": 226
},
{
"epoch": 0.1439441978440076,
"grad_norm": 1.0199164152145386,
"learning_rate": 4.118540613054156e-05,
"loss": 3.1531,
"step": 227
},
{
"epoch": 0.14457831325301204,
"grad_norm": 1.031343698501587,
"learning_rate": 4.078923629479943e-05,
"loss": 3.1324,
"step": 228
},
{
"epoch": 0.14521242866201647,
"grad_norm": 1.0214942693710327,
"learning_rate": 4.039366413231458e-05,
"loss": 3.0392,
"step": 229
},
{
"epoch": 0.14584654407102093,
"grad_norm": 1.0384721755981445,
"learning_rate": 3.9998715311197785e-05,
"loss": 3.1602,
"step": 230
},
{
"epoch": 0.14648065948002537,
"grad_norm": 1.062154769897461,
"learning_rate": 3.960441545911204e-05,
"loss": 3.1477,
"step": 231
},
{
"epoch": 0.1471147748890298,
"grad_norm": 1.1326991319656372,
"learning_rate": 3.92107901616097e-05,
"loss": 3.0429,
"step": 232
},
{
"epoch": 0.14774889029803423,
"grad_norm": 1.0269993543624878,
"learning_rate": 3.8817864960472236e-05,
"loss": 3.038,
"step": 233
},
{
"epoch": 0.1483830057070387,
"grad_norm": 1.0221335887908936,
"learning_rate": 3.842566535205286e-05,
"loss": 3.0781,
"step": 234
},
{
"epoch": 0.14901712111604312,
"grad_norm": 1.4944452047348022,
"learning_rate": 3.803421678562213e-05,
"loss": 3.1389,
"step": 235
},
{
"epoch": 0.14965123652504755,
"grad_norm": 1.0359796285629272,
"learning_rate": 3.764354466171652e-05,
"loss": 3.0269,
"step": 236
},
{
"epoch": 0.15028535193405199,
"grad_norm": 1.1374757289886475,
"learning_rate": 3.725367433049033e-05,
"loss": 3.0241,
"step": 237
},
{
"epoch": 0.15091946734305645,
"grad_norm": 1.0923086404800415,
"learning_rate": 3.6864631090070655e-05,
"loss": 2.9826,
"step": 238
},
{
"epoch": 0.15155358275206088,
"grad_norm": 1.085671305656433,
"learning_rate": 3.6476440184915815e-05,
"loss": 3.0977,
"step": 239
},
{
"epoch": 0.1521876981610653,
"grad_norm": 1.0791702270507812,
"learning_rate": 3.608912680417737e-05,
"loss": 2.9639,
"step": 240
},
{
"epoch": 0.15282181357006974,
"grad_norm": 1.1274312734603882,
"learning_rate": 3.570271608006555e-05,
"loss": 2.8279,
"step": 241
},
{
"epoch": 0.1534559289790742,
"grad_norm": 1.0965379476547241,
"learning_rate": 3.531723308621847e-05,
"loss": 2.9191,
"step": 242
},
{
"epoch": 0.15409004438807863,
"grad_norm": 1.2408086061477661,
"learning_rate": 3.493270283607522e-05,
"loss": 2.8455,
"step": 243
},
{
"epoch": 0.15472415979708307,
"grad_norm": 1.1654704809188843,
"learning_rate": 3.4549150281252636e-05,
"loss": 2.8552,
"step": 244
},
{
"epoch": 0.1553582752060875,
"grad_norm": 1.2121531963348389,
"learning_rate": 3.4166600309926387e-05,
"loss": 2.8707,
"step": 245
},
{
"epoch": 0.15599239061509196,
"grad_norm": 1.23041832447052,
"learning_rate": 3.3785077745215873e-05,
"loss": 2.8064,
"step": 246
},
{
"epoch": 0.1566265060240964,
"grad_norm": 1.237444281578064,
"learning_rate": 3.340460734357359e-05,
"loss": 2.5598,
"step": 247
},
{
"epoch": 0.15726062143310082,
"grad_norm": 1.2903790473937988,
"learning_rate": 3.3025213793178646e-05,
"loss": 2.6563,
"step": 248
},
{
"epoch": 0.15789473684210525,
"grad_norm": 1.252323865890503,
"learning_rate": 3.264692171233485e-05,
"loss": 2.4976,
"step": 249
},
{
"epoch": 0.15852885225110971,
"grad_norm": 1.4426056146621704,
"learning_rate": 3.226975564787322e-05,
"loss": 2.5239,
"step": 250
},
{
"epoch": 0.15852885225110971,
"eval_loss": 0.7680420875549316,
"eval_runtime": 299.1281,
"eval_samples_per_second": 8.879,
"eval_steps_per_second": 2.22,
"step": 250
},
{
"epoch": 0.15916296766011415,
"grad_norm": 1.0694372653961182,
"learning_rate": 3.189374007355917e-05,
"loss": 3.8561,
"step": 251
},
{
"epoch": 0.15979708306911858,
"grad_norm": 1.0747451782226562,
"learning_rate": 3.151889938850445e-05,
"loss": 3.1765,
"step": 252
},
{
"epoch": 0.160431198478123,
"grad_norm": 1.0591131448745728,
"learning_rate": 3.114525791558398e-05,
"loss": 3.2664,
"step": 253
},
{
"epoch": 0.16106531388712747,
"grad_norm": 0.950308620929718,
"learning_rate": 3.0772839899857464e-05,
"loss": 3.0991,
"step": 254
},
{
"epoch": 0.1616994292961319,
"grad_norm": 0.9497650265693665,
"learning_rate": 3.0401669506996256e-05,
"loss": 3.3147,
"step": 255
},
{
"epoch": 0.16233354470513633,
"grad_norm": 0.9711596369743347,
"learning_rate": 3.003177082171523e-05,
"loss": 3.3452,
"step": 256
},
{
"epoch": 0.16296766011414077,
"grad_norm": 0.9367339015007019,
"learning_rate": 2.9663167846209998e-05,
"loss": 3.1852,
"step": 257
},
{
"epoch": 0.1636017755231452,
"grad_norm": 0.9764601588249207,
"learning_rate": 2.9295884498599414e-05,
"loss": 3.2691,
"step": 258
},
{
"epoch": 0.16423589093214966,
"grad_norm": 0.958132266998291,
"learning_rate": 2.8929944611373554e-05,
"loss": 3.2167,
"step": 259
},
{
"epoch": 0.1648700063411541,
"grad_norm": 1.0170295238494873,
"learning_rate": 2.8565371929847284e-05,
"loss": 3.2041,
"step": 260
},
{
"epoch": 0.16550412175015852,
"grad_norm": 0.997759222984314,
"learning_rate": 2.8202190110619493e-05,
"loss": 3.2763,
"step": 261
},
{
"epoch": 0.16613823715916295,
"grad_norm": 1.0056345462799072,
"learning_rate": 2.784042272003794e-05,
"loss": 3.263,
"step": 262
},
{
"epoch": 0.16677235256816741,
"grad_norm": 0.9892311692237854,
"learning_rate": 2.7480093232670158e-05,
"loss": 3.0584,
"step": 263
},
{
"epoch": 0.16740646797717185,
"grad_norm": 0.9892306923866272,
"learning_rate": 2.712122502978024e-05,
"loss": 3.2246,
"step": 264
},
{
"epoch": 0.16804058338617628,
"grad_norm": 1.1950269937515259,
"learning_rate": 2.6763841397811573e-05,
"loss": 3.2072,
"step": 265
},
{
"epoch": 0.1686746987951807,
"grad_norm": 1.0062364339828491,
"learning_rate": 2.64079655268759e-05,
"loss": 3.2419,
"step": 266
},
{
"epoch": 0.16930881420418517,
"grad_norm": 0.9735838770866394,
"learning_rate": 2.605362050924848e-05,
"loss": 3.1027,
"step": 267
},
{
"epoch": 0.1699429296131896,
"grad_norm": 1.0225987434387207,
"learning_rate": 2.57008293378697e-05,
"loss": 3.1776,
"step": 268
},
{
"epoch": 0.17057704502219403,
"grad_norm": 1.0058813095092773,
"learning_rate": 2.534961490485313e-05,
"loss": 3.255,
"step": 269
},
{
"epoch": 0.17121116043119847,
"grad_norm": 0.9906468391418457,
"learning_rate": 2.500000000000001e-05,
"loss": 3.1952,
"step": 270
},
{
"epoch": 0.17184527584020293,
"grad_norm": 0.985464870929718,
"learning_rate": 2.4652007309320498e-05,
"loss": 3.1027,
"step": 271
},
{
"epoch": 0.17247939124920736,
"grad_norm": 0.9994264841079712,
"learning_rate": 2.430565941356157e-05,
"loss": 3.1101,
"step": 272
},
{
"epoch": 0.1731135066582118,
"grad_norm": 0.9816799759864807,
"learning_rate": 2.3960978786741877e-05,
"loss": 3.1194,
"step": 273
},
{
"epoch": 0.17374762206721622,
"grad_norm": 1.069852590560913,
"learning_rate": 2.361798779469336e-05,
"loss": 3.1427,
"step": 274
},
{
"epoch": 0.17438173747622068,
"grad_norm": 1.0157891511917114,
"learning_rate": 2.3276708693609943e-05,
"loss": 3.0912,
"step": 275
},
{
"epoch": 0.17501585288522511,
"grad_norm": 1.4053800106048584,
"learning_rate": 2.2937163628603435e-05,
"loss": 3.0539,
"step": 276
},
{
"epoch": 0.17564996829422955,
"grad_norm": 0.9908464550971985,
"learning_rate": 2.259937463226651e-05,
"loss": 3.0635,
"step": 277
},
{
"epoch": 0.17628408370323398,
"grad_norm": 1.0277864933013916,
"learning_rate": 2.2263363623243054e-05,
"loss": 3.0571,
"step": 278
},
{
"epoch": 0.17691819911223844,
"grad_norm": 1.0323623418807983,
"learning_rate": 2.192915240480596e-05,
"loss": 3.1435,
"step": 279
},
{
"epoch": 0.17755231452124287,
"grad_norm": 1.00318443775177,
"learning_rate": 2.1596762663442218e-05,
"loss": 2.9892,
"step": 280
},
{
"epoch": 0.1781864299302473,
"grad_norm": 1.02731454372406,
"learning_rate": 2.1266215967445824e-05,
"loss": 3.0905,
"step": 281
},
{
"epoch": 0.17882054533925174,
"grad_norm": 1.0222443342208862,
"learning_rate": 2.0937533765518187e-05,
"loss": 3.1027,
"step": 282
},
{
"epoch": 0.1794546607482562,
"grad_norm": 1.115246057510376,
"learning_rate": 2.061073738537635e-05,
"loss": 3.0372,
"step": 283
},
{
"epoch": 0.18008877615726063,
"grad_norm": 1.0481406450271606,
"learning_rate": 2.0285848032369137e-05,
"loss": 3.0176,
"step": 284
},
{
"epoch": 0.18072289156626506,
"grad_norm": 1.0405553579330444,
"learning_rate": 1.996288678810105e-05,
"loss": 2.92,
"step": 285
},
{
"epoch": 0.1813570069752695,
"grad_norm": 1.0391370058059692,
"learning_rate": 1.9641874609064443e-05,
"loss": 2.9469,
"step": 286
},
{
"epoch": 0.18199112238427395,
"grad_norm": 1.0537941455841064,
"learning_rate": 1.932283232527956e-05,
"loss": 3.0066,
"step": 287
},
{
"epoch": 0.18262523779327838,
"grad_norm": 1.1033146381378174,
"learning_rate": 1.9005780638942982e-05,
"loss": 3.1014,
"step": 288
},
{
"epoch": 0.18325935320228282,
"grad_norm": 1.0815560817718506,
"learning_rate": 1.8690740123084316e-05,
"loss": 2.9473,
"step": 289
},
{
"epoch": 0.18389346861128725,
"grad_norm": 1.1662791967391968,
"learning_rate": 1.837773122023114e-05,
"loss": 3.0494,
"step": 290
},
{
"epoch": 0.18452758402029168,
"grad_norm": 1.1125469207763672,
"learning_rate": 1.8066774241082612e-05,
"loss": 3.1307,
"step": 291
},
{
"epoch": 0.18516169942929614,
"grad_norm": 1.1446367502212524,
"learning_rate": 1.7757889363191483e-05,
"loss": 2.9742,
"step": 292
},
{
"epoch": 0.18579581483830057,
"grad_norm": 1.1044974327087402,
"learning_rate": 1.745109662965481e-05,
"loss": 2.8596,
"step": 293
},
{
"epoch": 0.186429930247305,
"grad_norm": 1.292734980583191,
"learning_rate": 1.714641594781347e-05,
"loss": 2.9186,
"step": 294
},
{
"epoch": 0.18706404565630944,
"grad_norm": 1.1280649900436401,
"learning_rate": 1.684386708796025e-05,
"loss": 2.7486,
"step": 295
},
{
"epoch": 0.1876981610653139,
"grad_norm": 1.157607913017273,
"learning_rate": 1.6543469682057106e-05,
"loss": 2.6953,
"step": 296
},
{
"epoch": 0.18833227647431833,
"grad_norm": 1.1997601985931396,
"learning_rate": 1.62452432224612e-05,
"loss": 2.5338,
"step": 297
},
{
"epoch": 0.18896639188332276,
"grad_norm": 1.5699822902679443,
"learning_rate": 1.5949207060660138e-05,
"loss": 2.728,
"step": 298
},
{
"epoch": 0.1896005072923272,
"grad_norm": 1.2640992403030396,
"learning_rate": 1.5655380406016235e-05,
"loss": 2.4079,
"step": 299
},
{
"epoch": 0.19023462270133165,
"grad_norm": 1.4184346199035645,
"learning_rate": 1.536378232452003e-05,
"loss": 2.5856,
"step": 300
},
{
"epoch": 0.19023462270133165,
"eval_loss": 0.7586191296577454,
"eval_runtime": 299.2736,
"eval_samples_per_second": 8.875,
"eval_steps_per_second": 2.219,
"step": 300
},
{
"epoch": 0.19086873811033608,
"grad_norm": 1.0150848627090454,
"learning_rate": 1.5074431737553157e-05,
"loss": 3.2563,
"step": 301
},
{
"epoch": 0.19150285351934052,
"grad_norm": 1.2016209363937378,
"learning_rate": 1.4787347420660541e-05,
"loss": 3.2713,
"step": 302
},
{
"epoch": 0.19213696892834495,
"grad_norm": 1.01749849319458,
"learning_rate": 1.4502548002332088e-05,
"loss": 3.182,
"step": 303
},
{
"epoch": 0.1927710843373494,
"grad_norm": 0.988182783126831,
"learning_rate": 1.422005196279395e-05,
"loss": 3.1752,
"step": 304
},
{
"epoch": 0.19340519974635384,
"grad_norm": 0.9728803038597107,
"learning_rate": 1.3939877632809278e-05,
"loss": 3.2968,
"step": 305
},
{
"epoch": 0.19403931515535827,
"grad_norm": 0.935308039188385,
"learning_rate": 1.3662043192488849e-05,
"loss": 3.0506,
"step": 306
},
{
"epoch": 0.1946734305643627,
"grad_norm": 0.9322471618652344,
"learning_rate": 1.338656667011134e-05,
"loss": 3.2073,
"step": 307
},
{
"epoch": 0.19530754597336716,
"grad_norm": 0.9291597008705139,
"learning_rate": 1.3113465940953495e-05,
"loss": 3.1834,
"step": 308
},
{
"epoch": 0.1959416613823716,
"grad_norm": 0.927598774433136,
"learning_rate": 1.2842758726130283e-05,
"loss": 3.1462,
"step": 309
},
{
"epoch": 0.19657577679137603,
"grad_norm": 0.9430285096168518,
"learning_rate": 1.257446259144494e-05,
"loss": 3.1203,
"step": 310
},
{
"epoch": 0.19720989220038046,
"grad_norm": 1.4846646785736084,
"learning_rate": 1.2308594946249163e-05,
"loss": 3.1325,
"step": 311
},
{
"epoch": 0.19784400760938492,
"grad_norm": 0.9747286438941956,
"learning_rate": 1.204517304231343e-05,
"loss": 3.0351,
"step": 312
},
{
"epoch": 0.19847812301838935,
"grad_norm": 0.9820675849914551,
"learning_rate": 1.178421397270758e-05,
"loss": 3.1296,
"step": 313
},
{
"epoch": 0.19911223842739378,
"grad_norm": 0.9933830499649048,
"learning_rate": 1.1525734670691701e-05,
"loss": 3.1196,
"step": 314
},
{
"epoch": 0.19974635383639822,
"grad_norm": 0.9653772711753845,
"learning_rate": 1.1269751908617277e-05,
"loss": 3.0227,
"step": 315
},
{
"epoch": 0.20038046924540268,
"grad_norm": 1.0138531923294067,
"learning_rate": 1.1016282296838887e-05,
"loss": 3.0659,
"step": 316
},
{
"epoch": 0.2010145846544071,
"grad_norm": 0.9920913577079773,
"learning_rate": 1.0765342282636416e-05,
"loss": 3.1804,
"step": 317
},
{
"epoch": 0.20164870006341154,
"grad_norm": 0.9812908172607422,
"learning_rate": 1.0516948149147754e-05,
"loss": 3.1213,
"step": 318
},
{
"epoch": 0.20228281547241597,
"grad_norm": 1.0075311660766602,
"learning_rate": 1.0271116014312293e-05,
"loss": 3.1012,
"step": 319
},
{
"epoch": 0.20291693088142043,
"grad_norm": 1.024266242980957,
"learning_rate": 1.0027861829824952e-05,
"loss": 3.1247,
"step": 320
},
{
"epoch": 0.20355104629042486,
"grad_norm": 0.9915530681610107,
"learning_rate": 9.787201380101157e-06,
"loss": 3.1249,
"step": 321
},
{
"epoch": 0.2041851616994293,
"grad_norm": 1.0081713199615479,
"learning_rate": 9.549150281252633e-06,
"loss": 3.044,
"step": 322
},
{
"epoch": 0.20481927710843373,
"grad_norm": 1.0550603866577148,
"learning_rate": 9.313723980074018e-06,
"loss": 3.2099,
"step": 323
},
{
"epoch": 0.20545339251743816,
"grad_norm": 1.0586062669754028,
"learning_rate": 9.080937753040646e-06,
"loss": 3.2203,
"step": 324
},
{
"epoch": 0.20608750792644262,
"grad_norm": 0.9967835545539856,
"learning_rate": 8.850806705317183e-06,
"loss": 3.0307,
"step": 325
},
{
"epoch": 0.20672162333544705,
"grad_norm": 1.0282031297683716,
"learning_rate": 8.623345769777514e-06,
"loss": 3.0631,
"step": 326
},
{
"epoch": 0.20735573874445148,
"grad_norm": 0.9924097061157227,
"learning_rate": 8.398569706035792e-06,
"loss": 2.9257,
"step": 327
},
{
"epoch": 0.20798985415345592,
"grad_norm": 1.0178431272506714,
"learning_rate": 8.176493099488663e-06,
"loss": 3.0914,
"step": 328
},
{
"epoch": 0.20862396956246038,
"grad_norm": 1.0140167474746704,
"learning_rate": 7.957130360368898e-06,
"loss": 3.1761,
"step": 329
},
{
"epoch": 0.2092580849714648,
"grad_norm": 1.1238445043563843,
"learning_rate": 7.740495722810271e-06,
"loss": 2.9859,
"step": 330
},
{
"epoch": 0.20989220038046924,
"grad_norm": 1.0198321342468262,
"learning_rate": 7.526603243923957e-06,
"loss": 3.1234,
"step": 331
},
{
"epoch": 0.21052631578947367,
"grad_norm": 1.0335346460342407,
"learning_rate": 7.315466802886401e-06,
"loss": 3.0115,
"step": 332
},
{
"epoch": 0.21116043119847813,
"grad_norm": 1.0279099941253662,
"learning_rate": 7.107100100038671e-06,
"loss": 3.0316,
"step": 333
},
{
"epoch": 0.21179454660748256,
"grad_norm": 1.0411081314086914,
"learning_rate": 6.901516655997536e-06,
"loss": 3.0541,
"step": 334
},
{
"epoch": 0.212428662016487,
"grad_norm": 1.0638669729232788,
"learning_rate": 6.698729810778065e-06,
"loss": 3.1495,
"step": 335
},
{
"epoch": 0.21306277742549143,
"grad_norm": 1.0645053386688232,
"learning_rate": 6.498752722928042e-06,
"loss": 3.13,
"step": 336
},
{
"epoch": 0.2136968928344959,
"grad_norm": 1.1958521604537964,
"learning_rate": 6.301598368674105e-06,
"loss": 2.9266,
"step": 337
},
{
"epoch": 0.21433100824350032,
"grad_norm": 1.0360051393508911,
"learning_rate": 6.107279541079769e-06,
"loss": 3.0758,
"step": 338
},
{
"epoch": 0.21496512365250475,
"grad_norm": 1.070273518562317,
"learning_rate": 5.915808849215304e-06,
"loss": 3.0503,
"step": 339
},
{
"epoch": 0.21559923906150918,
"grad_norm": 1.0638577938079834,
"learning_rate": 5.727198717339511e-06,
"loss": 2.9783,
"step": 340
},
{
"epoch": 0.21623335447051364,
"grad_norm": 1.0641688108444214,
"learning_rate": 5.54146138409355e-06,
"loss": 3.0227,
"step": 341
},
{
"epoch": 0.21686746987951808,
"grad_norm": 1.0784544944763184,
"learning_rate": 5.358608901706802e-06,
"loss": 2.9654,
"step": 342
},
{
"epoch": 0.2175015852885225,
"grad_norm": 1.1119335889816284,
"learning_rate": 5.178653135214812e-06,
"loss": 2.8726,
"step": 343
},
{
"epoch": 0.21813570069752694,
"grad_norm": 1.095287799835205,
"learning_rate": 5.001605761689398e-06,
"loss": 2.8824,
"step": 344
},
{
"epoch": 0.2187698161065314,
"grad_norm": 1.1402324438095093,
"learning_rate": 4.827478269480895e-06,
"loss": 2.8718,
"step": 345
},
{
"epoch": 0.21940393151553583,
"grad_norm": 1.1678264141082764,
"learning_rate": 4.65628195747273e-06,
"loss": 2.6562,
"step": 346
},
{
"epoch": 0.22003804692454026,
"grad_norm": 1.156401515007019,
"learning_rate": 4.488027934348271e-06,
"loss": 2.624,
"step": 347
},
{
"epoch": 0.2206721623335447,
"grad_norm": 1.2031807899475098,
"learning_rate": 4.322727117869951e-06,
"loss": 2.465,
"step": 348
},
{
"epoch": 0.22130627774254916,
"grad_norm": 1.2508282661437988,
"learning_rate": 4.16039023417088e-06,
"loss": 2.4946,
"step": 349
},
{
"epoch": 0.2219403931515536,
"grad_norm": 1.4422476291656494,
"learning_rate": 4.001027817058789e-06,
"loss": 2.5989,
"step": 350
},
{
"epoch": 0.2219403931515536,
"eval_loss": 0.7529736161231995,
"eval_runtime": 298.6761,
"eval_samples_per_second": 8.893,
"eval_steps_per_second": 2.223,
"step": 350
},
{
"epoch": 0.22257450856055802,
"grad_norm": 0.920236349105835,
"learning_rate": 3.844650207332562e-06,
"loss": 3.2697,
"step": 351
},
{
"epoch": 0.22320862396956245,
"grad_norm": 0.9723142385482788,
"learning_rate": 3.691267552111183e-06,
"loss": 3.1764,
"step": 352
},
{
"epoch": 0.2238427393785669,
"grad_norm": 0.9397497177124023,
"learning_rate": 3.54088980417534e-06,
"loss": 3.067,
"step": 353
},
{
"epoch": 0.22447685478757134,
"grad_norm": 0.9813097715377808,
"learning_rate": 3.393526721321616e-06,
"loss": 3.319,
"step": 354
},
{
"epoch": 0.22511097019657578,
"grad_norm": 0.9807033538818359,
"learning_rate": 3.249187865729264e-06,
"loss": 3.1667,
"step": 355
},
{
"epoch": 0.2257450856055802,
"grad_norm": 0.9774824976921082,
"learning_rate": 3.1078826033397843e-06,
"loss": 3.1495,
"step": 356
},
{
"epoch": 0.22637920101458464,
"grad_norm": 0.9869019389152527,
"learning_rate": 2.9696201032491434e-06,
"loss": 3.1348,
"step": 357
},
{
"epoch": 0.2270133164235891,
"grad_norm": 0.9642476439476013,
"learning_rate": 2.8344093371128424e-06,
"loss": 3.0795,
"step": 358
},
{
"epoch": 0.22764743183259353,
"grad_norm": 0.9823551177978516,
"learning_rate": 2.70225907856374e-06,
"loss": 3.21,
"step": 359
},
{
"epoch": 0.22828154724159797,
"grad_norm": 0.9760318398475647,
"learning_rate": 2.573177902642726e-06,
"loss": 3.1709,
"step": 360
},
{
"epoch": 0.2289156626506024,
"grad_norm": 0.947401762008667,
"learning_rate": 2.4471741852423237e-06,
"loss": 3.1004,
"step": 361
},
{
"epoch": 0.22954977805960686,
"grad_norm": 0.9643566012382507,
"learning_rate": 2.324256102563188e-06,
"loss": 3.1597,
"step": 362
},
{
"epoch": 0.2301838934686113,
"grad_norm": 0.970553994178772,
"learning_rate": 2.204431630583548e-06,
"loss": 3.0536,
"step": 363
},
{
"epoch": 0.23081800887761572,
"grad_norm": 0.9732277393341064,
"learning_rate": 2.087708544541689e-06,
"loss": 3.1504,
"step": 364
},
{
"epoch": 0.23145212428662015,
"grad_norm": 0.9547330141067505,
"learning_rate": 1.974094418431388e-06,
"loss": 3.0721,
"step": 365
},
{
"epoch": 0.2320862396956246,
"grad_norm": 0.9943172931671143,
"learning_rate": 1.8635966245104664e-06,
"loss": 3.1663,
"step": 366
},
{
"epoch": 0.23272035510462905,
"grad_norm": 0.9682246446609497,
"learning_rate": 1.7562223328224325e-06,
"loss": 3.0412,
"step": 367
},
{
"epoch": 0.23335447051363348,
"grad_norm": 0.9810298085212708,
"learning_rate": 1.6519785107311891e-06,
"loss": 3.219,
"step": 368
},
{
"epoch": 0.2339885859226379,
"grad_norm": 0.972686767578125,
"learning_rate": 1.5508719224689717e-06,
"loss": 3.0453,
"step": 369
},
{
"epoch": 0.23462270133164237,
"grad_norm": 0.9679805040359497,
"learning_rate": 1.4529091286973995e-06,
"loss": 3.0846,
"step": 370
},
{
"epoch": 0.2352568167406468,
"grad_norm": 1.3851791620254517,
"learning_rate": 1.358096486081778e-06,
"loss": 3.0721,
"step": 371
},
{
"epoch": 0.23589093214965123,
"grad_norm": 0.9714539647102356,
"learning_rate": 1.2664401468786114e-06,
"loss": 3.057,
"step": 372
},
{
"epoch": 0.23652504755865567,
"grad_norm": 1.0152660608291626,
"learning_rate": 1.1779460585363944e-06,
"loss": 3.1163,
"step": 373
},
{
"epoch": 0.23715916296766013,
"grad_norm": 0.9702284932136536,
"learning_rate": 1.0926199633097157e-06,
"loss": 2.9209,
"step": 374
},
{
"epoch": 0.23779327837666456,
"grad_norm": 1.0239858627319336,
"learning_rate": 1.0104673978866164e-06,
"loss": 3.0793,
"step": 375
},
{
"epoch": 0.238427393785669,
"grad_norm": 1.0014803409576416,
"learning_rate": 9.314936930293283e-07,
"loss": 3.1433,
"step": 376
},
{
"epoch": 0.23906150919467342,
"grad_norm": 1.0843709707260132,
"learning_rate": 8.557039732283944e-07,
"loss": 3.0487,
"step": 377
},
{
"epoch": 0.23969562460367788,
"grad_norm": 1.011506199836731,
"learning_rate": 7.83103156370113e-07,
"loss": 2.96,
"step": 378
},
{
"epoch": 0.2403297400126823,
"grad_norm": 1.371131420135498,
"learning_rate": 7.136959534174592e-07,
"loss": 3.0234,
"step": 379
},
{
"epoch": 0.24096385542168675,
"grad_norm": 1.011451005935669,
"learning_rate": 6.474868681043578e-07,
"loss": 3.1524,
"step": 380
},
{
"epoch": 0.24159797083069118,
"grad_norm": 1.0045251846313477,
"learning_rate": 5.844801966434832e-07,
"loss": 3.0862,
"step": 381
},
{
"epoch": 0.24223208623969564,
"grad_norm": 1.0423835515975952,
"learning_rate": 5.246800274474439e-07,
"loss": 3.1457,
"step": 382
},
{
"epoch": 0.24286620164870007,
"grad_norm": 1.0296357870101929,
"learning_rate": 4.680902408635335e-07,
"loss": 3.0892,
"step": 383
},
{
"epoch": 0.2435003170577045,
"grad_norm": 1.0572096109390259,
"learning_rate": 4.1471450892189846e-07,
"loss": 3.1505,
"step": 384
},
{
"epoch": 0.24413443246670893,
"grad_norm": 1.027784824371338,
"learning_rate": 3.6455629509730136e-07,
"loss": 3.0082,
"step": 385
},
{
"epoch": 0.2447685478757134,
"grad_norm": 1.03461754322052,
"learning_rate": 3.1761885408435054e-07,
"loss": 3.0415,
"step": 386
},
{
"epoch": 0.24540266328471783,
"grad_norm": 1.0629018545150757,
"learning_rate": 2.7390523158633554e-07,
"loss": 3.1103,
"step": 387
},
{
"epoch": 0.24603677869372226,
"grad_norm": 1.0713993310928345,
"learning_rate": 2.334182641175686e-07,
"loss": 3.1734,
"step": 388
},
{
"epoch": 0.2466708941027267,
"grad_norm": 1.0982544422149658,
"learning_rate": 1.9616057881935436e-07,
"loss": 2.9949,
"step": 389
},
{
"epoch": 0.24730500951173112,
"grad_norm": 1.064578890800476,
"learning_rate": 1.6213459328950352e-07,
"loss": 2.8567,
"step": 390
},
{
"epoch": 0.24793912492073558,
"grad_norm": 1.0758435726165771,
"learning_rate": 1.3134251542544774e-07,
"loss": 2.9331,
"step": 391
},
{
"epoch": 0.24857324032974001,
"grad_norm": 1.1049171686172485,
"learning_rate": 1.0378634328099269e-07,
"loss": 2.8457,
"step": 392
},
{
"epoch": 0.24920735573874445,
"grad_norm": 1.0962225198745728,
"learning_rate": 7.946786493666647e-08,
"loss": 2.8366,
"step": 393
},
{
"epoch": 0.24984147114774888,
"grad_norm": 1.1087645292282104,
"learning_rate": 5.838865838366792e-08,
"loss": 2.6736,
"step": 394
},
{
"epoch": 0.25047558655675334,
"grad_norm": 1.1320421695709229,
"learning_rate": 4.055009142152067e-08,
"loss": 2.5256,
"step": 395
},
{
"epoch": 0.25110970196575777,
"grad_norm": 1.1543631553649902,
"learning_rate": 2.595332156925534e-08,
"loss": 2.6751,
"step": 396
},
{
"epoch": 0.2517438173747622,
"grad_norm": 1.2271840572357178,
"learning_rate": 1.4599295990352924e-08,
"loss": 2.4729,
"step": 397
},
{
"epoch": 0.25237793278376663,
"grad_norm": 1.232959270477295,
"learning_rate": 6.488751431266149e-09,
"loss": 2.5886,
"step": 398
},
{
"epoch": 0.25301204819277107,
"grad_norm": 1.39299476146698,
"learning_rate": 1.622214173602199e-09,
"loss": 2.5761,
"step": 399
},
{
"epoch": 0.2536461636017755,
"grad_norm": 1.4202094078063965,
"learning_rate": 0.0,
"loss": 2.5205,
"step": 400
},
{
"epoch": 0.2536461636017755,
"eval_loss": 0.7515735626220703,
"eval_runtime": 299.3765,
"eval_samples_per_second": 8.872,
"eval_steps_per_second": 2.218,
"step": 400
}
],
"logging_steps": 1,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.35326756330537e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}