centime / trainer_state.json
sizhkhy's picture
1.5
26cdf89 verified
{
"best_metric": 0.007014184258878231,
"best_model_checkpoint": "/home/paperspace/Data/models/centime/llm3br256/checkpoint-800",
"epoch": 4.468599033816425,
"eval_steps": 25,
"global_step": 925,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004830917874396135,
"grad_norm": 0.631982147693634,
"learning_rate": 1.9305019305019306e-07,
"loss": 0.1809,
"step": 1
},
{
"epoch": 0.00966183574879227,
"grad_norm": 0.5283491015434265,
"learning_rate": 3.8610038610038613e-07,
"loss": 0.17,
"step": 2
},
{
"epoch": 0.014492753623188406,
"grad_norm": 0.5738242864608765,
"learning_rate": 5.791505791505791e-07,
"loss": 0.1893,
"step": 3
},
{
"epoch": 0.01932367149758454,
"grad_norm": 0.5831884741783142,
"learning_rate": 7.722007722007723e-07,
"loss": 0.1764,
"step": 4
},
{
"epoch": 0.024154589371980676,
"grad_norm": 0.5098283886909485,
"learning_rate": 9.652509652509653e-07,
"loss": 0.1701,
"step": 5
},
{
"epoch": 0.028985507246376812,
"grad_norm": 0.5736549496650696,
"learning_rate": 1.1583011583011583e-06,
"loss": 0.1891,
"step": 6
},
{
"epoch": 0.033816425120772944,
"grad_norm": 0.5005951523780823,
"learning_rate": 1.3513513513513515e-06,
"loss": 0.164,
"step": 7
},
{
"epoch": 0.03864734299516908,
"grad_norm": 0.591622531414032,
"learning_rate": 1.5444015444015445e-06,
"loss": 0.1737,
"step": 8
},
{
"epoch": 0.043478260869565216,
"grad_norm": 0.5805624127388,
"learning_rate": 1.7374517374517375e-06,
"loss": 0.1965,
"step": 9
},
{
"epoch": 0.04830917874396135,
"grad_norm": 0.491947740316391,
"learning_rate": 1.9305019305019305e-06,
"loss": 0.1741,
"step": 10
},
{
"epoch": 0.05314009661835749,
"grad_norm": 0.431071937084198,
"learning_rate": 2.1235521235521236e-06,
"loss": 0.1531,
"step": 11
},
{
"epoch": 0.057971014492753624,
"grad_norm": 0.46217894554138184,
"learning_rate": 2.3166023166023166e-06,
"loss": 0.1544,
"step": 12
},
{
"epoch": 0.06280193236714976,
"grad_norm": 0.4673991799354553,
"learning_rate": 2.5096525096525096e-06,
"loss": 0.1599,
"step": 13
},
{
"epoch": 0.06763285024154589,
"grad_norm": 0.4075062572956085,
"learning_rate": 2.702702702702703e-06,
"loss": 0.1733,
"step": 14
},
{
"epoch": 0.07246376811594203,
"grad_norm": 0.38460931181907654,
"learning_rate": 2.895752895752896e-06,
"loss": 0.1754,
"step": 15
},
{
"epoch": 0.07729468599033816,
"grad_norm": 0.3275028169155121,
"learning_rate": 3.088803088803089e-06,
"loss": 0.1412,
"step": 16
},
{
"epoch": 0.0821256038647343,
"grad_norm": 0.3470560908317566,
"learning_rate": 3.2818532818532816e-06,
"loss": 0.1558,
"step": 17
},
{
"epoch": 0.08695652173913043,
"grad_norm": 0.3038932979106903,
"learning_rate": 3.474903474903475e-06,
"loss": 0.1467,
"step": 18
},
{
"epoch": 0.09178743961352658,
"grad_norm": 0.34106558561325073,
"learning_rate": 3.6679536679536685e-06,
"loss": 0.1603,
"step": 19
},
{
"epoch": 0.0966183574879227,
"grad_norm": 0.27034294605255127,
"learning_rate": 3.861003861003861e-06,
"loss": 0.1334,
"step": 20
},
{
"epoch": 0.10144927536231885,
"grad_norm": 0.2735786437988281,
"learning_rate": 4.0540540540540545e-06,
"loss": 0.1411,
"step": 21
},
{
"epoch": 0.10628019323671498,
"grad_norm": 0.26440420746803284,
"learning_rate": 4.247104247104247e-06,
"loss": 0.1448,
"step": 22
},
{
"epoch": 0.1111111111111111,
"grad_norm": 0.2545959949493408,
"learning_rate": 4.4401544401544405e-06,
"loss": 0.1352,
"step": 23
},
{
"epoch": 0.11594202898550725,
"grad_norm": 0.31827524304389954,
"learning_rate": 4.633204633204633e-06,
"loss": 0.1509,
"step": 24
},
{
"epoch": 0.12077294685990338,
"grad_norm": 0.2550322115421295,
"learning_rate": 4.8262548262548266e-06,
"loss": 0.1159,
"step": 25
},
{
"epoch": 0.12077294685990338,
"eval_loss": 0.10044483840465546,
"eval_runtime": 21.3847,
"eval_samples_per_second": 4.676,
"eval_steps_per_second": 0.14,
"step": 25
},
{
"epoch": 0.12560386473429952,
"grad_norm": 0.312794029712677,
"learning_rate": 5.019305019305019e-06,
"loss": 0.1705,
"step": 26
},
{
"epoch": 0.13043478260869565,
"grad_norm": 0.2570686936378479,
"learning_rate": 5.212355212355213e-06,
"loss": 0.1173,
"step": 27
},
{
"epoch": 0.13526570048309178,
"grad_norm": 0.1603628247976303,
"learning_rate": 5.405405405405406e-06,
"loss": 0.0852,
"step": 28
},
{
"epoch": 0.14009661835748793,
"grad_norm": 0.19329562783241272,
"learning_rate": 5.598455598455599e-06,
"loss": 0.1189,
"step": 29
},
{
"epoch": 0.14492753623188406,
"grad_norm": 0.19762450456619263,
"learning_rate": 5.791505791505792e-06,
"loss": 0.1149,
"step": 30
},
{
"epoch": 0.1497584541062802,
"grad_norm": 0.1888991743326187,
"learning_rate": 5.984555984555985e-06,
"loss": 0.1176,
"step": 31
},
{
"epoch": 0.15458937198067632,
"grad_norm": 0.20226870477199554,
"learning_rate": 6.177606177606178e-06,
"loss": 0.1362,
"step": 32
},
{
"epoch": 0.15942028985507245,
"grad_norm": 0.1509280502796173,
"learning_rate": 6.370656370656371e-06,
"loss": 0.0817,
"step": 33
},
{
"epoch": 0.1642512077294686,
"grad_norm": 0.1972505748271942,
"learning_rate": 6.563706563706563e-06,
"loss": 0.134,
"step": 34
},
{
"epoch": 0.16908212560386474,
"grad_norm": 0.18301893770694733,
"learning_rate": 6.7567567567567575e-06,
"loss": 0.1113,
"step": 35
},
{
"epoch": 0.17391304347826086,
"grad_norm": 0.15621688961982727,
"learning_rate": 6.94980694980695e-06,
"loss": 0.106,
"step": 36
},
{
"epoch": 0.178743961352657,
"grad_norm": 0.16429874300956726,
"learning_rate": 7.142857142857143e-06,
"loss": 0.1061,
"step": 37
},
{
"epoch": 0.18357487922705315,
"grad_norm": 0.1622830629348755,
"learning_rate": 7.335907335907337e-06,
"loss": 0.0986,
"step": 38
},
{
"epoch": 0.18840579710144928,
"grad_norm": 0.1767168790102005,
"learning_rate": 7.52895752895753e-06,
"loss": 0.1014,
"step": 39
},
{
"epoch": 0.1932367149758454,
"grad_norm": 0.1411493867635727,
"learning_rate": 7.722007722007722e-06,
"loss": 0.0862,
"step": 40
},
{
"epoch": 0.19806763285024154,
"grad_norm": 0.15203985571861267,
"learning_rate": 7.915057915057915e-06,
"loss": 0.0955,
"step": 41
},
{
"epoch": 0.2028985507246377,
"grad_norm": 0.13702470064163208,
"learning_rate": 8.108108108108109e-06,
"loss": 0.0921,
"step": 42
},
{
"epoch": 0.20772946859903382,
"grad_norm": 0.14217808842658997,
"learning_rate": 8.301158301158302e-06,
"loss": 0.0855,
"step": 43
},
{
"epoch": 0.21256038647342995,
"grad_norm": 0.1266465187072754,
"learning_rate": 8.494208494208494e-06,
"loss": 0.091,
"step": 44
},
{
"epoch": 0.21739130434782608,
"grad_norm": 0.1271313726902008,
"learning_rate": 8.687258687258689e-06,
"loss": 0.0759,
"step": 45
},
{
"epoch": 0.2222222222222222,
"grad_norm": 0.15804509818553925,
"learning_rate": 8.880308880308881e-06,
"loss": 0.0966,
"step": 46
},
{
"epoch": 0.22705314009661837,
"grad_norm": 0.12065310776233673,
"learning_rate": 9.073359073359074e-06,
"loss": 0.0736,
"step": 47
},
{
"epoch": 0.2318840579710145,
"grad_norm": 0.13479556143283844,
"learning_rate": 9.266409266409266e-06,
"loss": 0.0885,
"step": 48
},
{
"epoch": 0.23671497584541062,
"grad_norm": 0.12735073268413544,
"learning_rate": 9.45945945945946e-06,
"loss": 0.059,
"step": 49
},
{
"epoch": 0.24154589371980675,
"grad_norm": 0.13692739605903625,
"learning_rate": 9.652509652509653e-06,
"loss": 0.0843,
"step": 50
},
{
"epoch": 0.24154589371980675,
"eval_loss": 0.06345108151435852,
"eval_runtime": 20.328,
"eval_samples_per_second": 4.919,
"eval_steps_per_second": 0.148,
"step": 50
},
{
"epoch": 0.2463768115942029,
"grad_norm": 0.1594228297472,
"learning_rate": 9.845559845559846e-06,
"loss": 0.092,
"step": 51
},
{
"epoch": 0.25120772946859904,
"grad_norm": 0.11453366279602051,
"learning_rate": 1.0038610038610038e-05,
"loss": 0.0802,
"step": 52
},
{
"epoch": 0.2560386473429952,
"grad_norm": 0.12839704751968384,
"learning_rate": 1.0231660231660233e-05,
"loss": 0.0787,
"step": 53
},
{
"epoch": 0.2608695652173913,
"grad_norm": 0.10960253328084946,
"learning_rate": 1.0424710424710425e-05,
"loss": 0.0776,
"step": 54
},
{
"epoch": 0.26570048309178745,
"grad_norm": 0.1108475923538208,
"learning_rate": 1.0617760617760618e-05,
"loss": 0.0655,
"step": 55
},
{
"epoch": 0.27053140096618356,
"grad_norm": 0.12586434185504913,
"learning_rate": 1.0810810810810812e-05,
"loss": 0.0841,
"step": 56
},
{
"epoch": 0.2753623188405797,
"grad_norm": 0.1350884884595871,
"learning_rate": 1.1003861003861005e-05,
"loss": 0.0649,
"step": 57
},
{
"epoch": 0.28019323671497587,
"grad_norm": 0.12750767171382904,
"learning_rate": 1.1196911196911197e-05,
"loss": 0.1061,
"step": 58
},
{
"epoch": 0.28502415458937197,
"grad_norm": 0.1260327398777008,
"learning_rate": 1.138996138996139e-05,
"loss": 0.0819,
"step": 59
},
{
"epoch": 0.2898550724637681,
"grad_norm": 0.12157632410526276,
"learning_rate": 1.1583011583011584e-05,
"loss": 0.0744,
"step": 60
},
{
"epoch": 0.2946859903381642,
"grad_norm": 0.10264753550291061,
"learning_rate": 1.1776061776061777e-05,
"loss": 0.0695,
"step": 61
},
{
"epoch": 0.2995169082125604,
"grad_norm": 0.1185842826962471,
"learning_rate": 1.196911196911197e-05,
"loss": 0.0825,
"step": 62
},
{
"epoch": 0.30434782608695654,
"grad_norm": 0.14522415399551392,
"learning_rate": 1.2162162162162164e-05,
"loss": 0.0796,
"step": 63
},
{
"epoch": 0.30917874396135264,
"grad_norm": 0.1346604973077774,
"learning_rate": 1.2355212355212356e-05,
"loss": 0.0782,
"step": 64
},
{
"epoch": 0.3140096618357488,
"grad_norm": 0.10661154985427856,
"learning_rate": 1.2548262548262549e-05,
"loss": 0.0848,
"step": 65
},
{
"epoch": 0.3188405797101449,
"grad_norm": 0.09096452593803406,
"learning_rate": 1.2741312741312741e-05,
"loss": 0.0666,
"step": 66
},
{
"epoch": 0.32367149758454106,
"grad_norm": 0.11585734784603119,
"learning_rate": 1.2934362934362934e-05,
"loss": 0.0518,
"step": 67
},
{
"epoch": 0.3285024154589372,
"grad_norm": 0.102788545191288,
"learning_rate": 1.3127413127413127e-05,
"loss": 0.0664,
"step": 68
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.1031409502029419,
"learning_rate": 1.3320463320463322e-05,
"loss": 0.071,
"step": 69
},
{
"epoch": 0.33816425120772947,
"grad_norm": 0.1045219823718071,
"learning_rate": 1.3513513513513515e-05,
"loss": 0.0658,
"step": 70
},
{
"epoch": 0.34299516908212563,
"grad_norm": 0.09023850411176682,
"learning_rate": 1.3706563706563708e-05,
"loss": 0.0517,
"step": 71
},
{
"epoch": 0.34782608695652173,
"grad_norm": 0.08952303230762482,
"learning_rate": 1.38996138996139e-05,
"loss": 0.0534,
"step": 72
},
{
"epoch": 0.3526570048309179,
"grad_norm": 0.09404554218053818,
"learning_rate": 1.4092664092664093e-05,
"loss": 0.0652,
"step": 73
},
{
"epoch": 0.357487922705314,
"grad_norm": 0.09435431659221649,
"learning_rate": 1.4285714285714285e-05,
"loss": 0.0598,
"step": 74
},
{
"epoch": 0.36231884057971014,
"grad_norm": 0.13001886010169983,
"learning_rate": 1.4478764478764478e-05,
"loss": 0.0763,
"step": 75
},
{
"epoch": 0.36231884057971014,
"eval_loss": 0.04738979786634445,
"eval_runtime": 20.3171,
"eval_samples_per_second": 4.922,
"eval_steps_per_second": 0.148,
"step": 75
},
{
"epoch": 0.3671497584541063,
"grad_norm": 0.10421048104763031,
"learning_rate": 1.4671814671814674e-05,
"loss": 0.0686,
"step": 76
},
{
"epoch": 0.3719806763285024,
"grad_norm": 0.0982724204659462,
"learning_rate": 1.4864864864864867e-05,
"loss": 0.052,
"step": 77
},
{
"epoch": 0.37681159420289856,
"grad_norm": 0.11460539698600769,
"learning_rate": 1.505791505791506e-05,
"loss": 0.0632,
"step": 78
},
{
"epoch": 0.38164251207729466,
"grad_norm": 0.0933605507016182,
"learning_rate": 1.5250965250965252e-05,
"loss": 0.0479,
"step": 79
},
{
"epoch": 0.3864734299516908,
"grad_norm": 0.11033626645803452,
"learning_rate": 1.5444015444015444e-05,
"loss": 0.059,
"step": 80
},
{
"epoch": 0.391304347826087,
"grad_norm": 0.1291951686143875,
"learning_rate": 1.5637065637065637e-05,
"loss": 0.073,
"step": 81
},
{
"epoch": 0.3961352657004831,
"grad_norm": 0.11451836675405502,
"learning_rate": 1.583011583011583e-05,
"loss": 0.06,
"step": 82
},
{
"epoch": 0.40096618357487923,
"grad_norm": 0.1261017769575119,
"learning_rate": 1.6023166023166026e-05,
"loss": 0.0587,
"step": 83
},
{
"epoch": 0.4057971014492754,
"grad_norm": 0.1151406541466713,
"learning_rate": 1.6216216216216218e-05,
"loss": 0.0551,
"step": 84
},
{
"epoch": 0.4106280193236715,
"grad_norm": 0.10128024965524673,
"learning_rate": 1.640926640926641e-05,
"loss": 0.0511,
"step": 85
},
{
"epoch": 0.41545893719806765,
"grad_norm": 0.1041695848107338,
"learning_rate": 1.6602316602316603e-05,
"loss": 0.0676,
"step": 86
},
{
"epoch": 0.42028985507246375,
"grad_norm": 0.1372576802968979,
"learning_rate": 1.6795366795366796e-05,
"loss": 0.0656,
"step": 87
},
{
"epoch": 0.4251207729468599,
"grad_norm": 0.11239906400442123,
"learning_rate": 1.698841698841699e-05,
"loss": 0.0438,
"step": 88
},
{
"epoch": 0.42995169082125606,
"grad_norm": 0.10844457149505615,
"learning_rate": 1.718146718146718e-05,
"loss": 0.0567,
"step": 89
},
{
"epoch": 0.43478260869565216,
"grad_norm": 0.11423434317111969,
"learning_rate": 1.7374517374517377e-05,
"loss": 0.0669,
"step": 90
},
{
"epoch": 0.4396135265700483,
"grad_norm": 0.108824722468853,
"learning_rate": 1.756756756756757e-05,
"loss": 0.0582,
"step": 91
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.1085321456193924,
"learning_rate": 1.7760617760617762e-05,
"loss": 0.0518,
"step": 92
},
{
"epoch": 0.4492753623188406,
"grad_norm": 0.16060896217823029,
"learning_rate": 1.7953667953667955e-05,
"loss": 0.0853,
"step": 93
},
{
"epoch": 0.45410628019323673,
"grad_norm": 0.10322371125221252,
"learning_rate": 1.8146718146718147e-05,
"loss": 0.0434,
"step": 94
},
{
"epoch": 0.45893719806763283,
"grad_norm": 0.1057286411523819,
"learning_rate": 1.833976833976834e-05,
"loss": 0.0488,
"step": 95
},
{
"epoch": 0.463768115942029,
"grad_norm": 0.1063227504491806,
"learning_rate": 1.8532818532818533e-05,
"loss": 0.0521,
"step": 96
},
{
"epoch": 0.46859903381642515,
"grad_norm": 0.12127968668937683,
"learning_rate": 1.8725868725868725e-05,
"loss": 0.0548,
"step": 97
},
{
"epoch": 0.47342995169082125,
"grad_norm": 0.10702498257160187,
"learning_rate": 1.891891891891892e-05,
"loss": 0.0481,
"step": 98
},
{
"epoch": 0.4782608695652174,
"grad_norm": 0.10290008783340454,
"learning_rate": 1.9111969111969114e-05,
"loss": 0.0516,
"step": 99
},
{
"epoch": 0.4830917874396135,
"grad_norm": 0.10148600488901138,
"learning_rate": 1.9305019305019306e-05,
"loss": 0.0496,
"step": 100
},
{
"epoch": 0.4830917874396135,
"eval_loss": 0.036481305956840515,
"eval_runtime": 20.2995,
"eval_samples_per_second": 4.926,
"eval_steps_per_second": 0.148,
"step": 100
},
{
"epoch": 0.48792270531400966,
"grad_norm": 0.10877996683120728,
"learning_rate": 1.94980694980695e-05,
"loss": 0.0523,
"step": 101
},
{
"epoch": 0.4927536231884058,
"grad_norm": 0.11746326833963394,
"learning_rate": 1.969111969111969e-05,
"loss": 0.0448,
"step": 102
},
{
"epoch": 0.4975845410628019,
"grad_norm": 0.1287139356136322,
"learning_rate": 1.9884169884169884e-05,
"loss": 0.054,
"step": 103
},
{
"epoch": 0.5024154589371981,
"grad_norm": 0.10861372202634811,
"learning_rate": 2.0077220077220077e-05,
"loss": 0.0486,
"step": 104
},
{
"epoch": 0.5072463768115942,
"grad_norm": 0.10173708945512772,
"learning_rate": 2.0270270270270273e-05,
"loss": 0.0413,
"step": 105
},
{
"epoch": 0.5120772946859904,
"grad_norm": 0.09587957710027695,
"learning_rate": 2.0463320463320465e-05,
"loss": 0.0429,
"step": 106
},
{
"epoch": 0.5169082125603864,
"grad_norm": 0.10803103446960449,
"learning_rate": 2.0656370656370658e-05,
"loss": 0.0376,
"step": 107
},
{
"epoch": 0.5217391304347826,
"grad_norm": 0.12559597194194794,
"learning_rate": 2.084942084942085e-05,
"loss": 0.0435,
"step": 108
},
{
"epoch": 0.5265700483091788,
"grad_norm": 0.12464939057826996,
"learning_rate": 2.1042471042471043e-05,
"loss": 0.0527,
"step": 109
},
{
"epoch": 0.5314009661835749,
"grad_norm": 0.11706342548131943,
"learning_rate": 2.1235521235521236e-05,
"loss": 0.0451,
"step": 110
},
{
"epoch": 0.5362318840579711,
"grad_norm": 0.10209579020738602,
"learning_rate": 2.1428571428571428e-05,
"loss": 0.0405,
"step": 111
},
{
"epoch": 0.5410628019323671,
"grad_norm": 0.12944906949996948,
"learning_rate": 2.1621621621621624e-05,
"loss": 0.0536,
"step": 112
},
{
"epoch": 0.5458937198067633,
"grad_norm": 0.10440570116043091,
"learning_rate": 2.1814671814671817e-05,
"loss": 0.0353,
"step": 113
},
{
"epoch": 0.5507246376811594,
"grad_norm": 0.23940707743167877,
"learning_rate": 2.200772200772201e-05,
"loss": 0.0507,
"step": 114
},
{
"epoch": 0.5555555555555556,
"grad_norm": 0.11977086216211319,
"learning_rate": 2.2200772200772202e-05,
"loss": 0.0538,
"step": 115
},
{
"epoch": 0.5603864734299517,
"grad_norm": 0.15066705644130707,
"learning_rate": 2.2393822393822394e-05,
"loss": 0.0525,
"step": 116
},
{
"epoch": 0.5652173913043478,
"grad_norm": 0.12231211364269257,
"learning_rate": 2.2586872586872587e-05,
"loss": 0.0441,
"step": 117
},
{
"epoch": 0.5700483091787439,
"grad_norm": 0.09432463347911835,
"learning_rate": 2.277992277992278e-05,
"loss": 0.0345,
"step": 118
},
{
"epoch": 0.5748792270531401,
"grad_norm": 0.15186284482479095,
"learning_rate": 2.2972972972972976e-05,
"loss": 0.0495,
"step": 119
},
{
"epoch": 0.5797101449275363,
"grad_norm": 0.10990628600120544,
"learning_rate": 2.3166023166023168e-05,
"loss": 0.0387,
"step": 120
},
{
"epoch": 0.5845410628019324,
"grad_norm": 0.12975062429904938,
"learning_rate": 2.335907335907336e-05,
"loss": 0.0416,
"step": 121
},
{
"epoch": 0.5893719806763285,
"grad_norm": 0.11566514521837234,
"learning_rate": 2.3552123552123553e-05,
"loss": 0.0386,
"step": 122
},
{
"epoch": 0.5942028985507246,
"grad_norm": 0.11288753151893616,
"learning_rate": 2.3745173745173746e-05,
"loss": 0.0435,
"step": 123
},
{
"epoch": 0.5990338164251208,
"grad_norm": 0.12189905345439911,
"learning_rate": 2.393822393822394e-05,
"loss": 0.0452,
"step": 124
},
{
"epoch": 0.6038647342995169,
"grad_norm": 0.11823577433824539,
"learning_rate": 2.413127413127413e-05,
"loss": 0.046,
"step": 125
},
{
"epoch": 0.6038647342995169,
"eval_loss": 0.03164202719926834,
"eval_runtime": 20.309,
"eval_samples_per_second": 4.924,
"eval_steps_per_second": 0.148,
"step": 125
},
{
"epoch": 0.6086956521739131,
"grad_norm": 0.11446941643953323,
"learning_rate": 2.4324324324324327e-05,
"loss": 0.0399,
"step": 126
},
{
"epoch": 0.6135265700483091,
"grad_norm": 0.10811971127986908,
"learning_rate": 2.451737451737452e-05,
"loss": 0.0269,
"step": 127
},
{
"epoch": 0.6183574879227053,
"grad_norm": 0.10582166165113449,
"learning_rate": 2.4710424710424712e-05,
"loss": 0.0408,
"step": 128
},
{
"epoch": 0.6231884057971014,
"grad_norm": 0.10825195163488388,
"learning_rate": 2.4903474903474905e-05,
"loss": 0.0439,
"step": 129
},
{
"epoch": 0.6280193236714976,
"grad_norm": 0.11372285336256027,
"learning_rate": 2.5096525096525097e-05,
"loss": 0.0345,
"step": 130
},
{
"epoch": 0.6328502415458938,
"grad_norm": 0.12226233631372452,
"learning_rate": 2.528957528957529e-05,
"loss": 0.045,
"step": 131
},
{
"epoch": 0.6376811594202898,
"grad_norm": 0.125160351395607,
"learning_rate": 2.5482625482625483e-05,
"loss": 0.0508,
"step": 132
},
{
"epoch": 0.642512077294686,
"grad_norm": 0.10091613233089447,
"learning_rate": 2.5675675675675675e-05,
"loss": 0.0361,
"step": 133
},
{
"epoch": 0.6473429951690821,
"grad_norm": 0.09511641412973404,
"learning_rate": 2.5868725868725868e-05,
"loss": 0.0327,
"step": 134
},
{
"epoch": 0.6521739130434783,
"grad_norm": 0.11392970383167267,
"learning_rate": 2.606177606177606e-05,
"loss": 0.0427,
"step": 135
},
{
"epoch": 0.6570048309178744,
"grad_norm": 0.11419506371021271,
"learning_rate": 2.6254826254826253e-05,
"loss": 0.0395,
"step": 136
},
{
"epoch": 0.6618357487922706,
"grad_norm": 0.11490583419799805,
"learning_rate": 2.6447876447876452e-05,
"loss": 0.0297,
"step": 137
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.12591791152954102,
"learning_rate": 2.6640926640926645e-05,
"loss": 0.0346,
"step": 138
},
{
"epoch": 0.6714975845410628,
"grad_norm": 0.13922667503356934,
"learning_rate": 2.6833976833976838e-05,
"loss": 0.0407,
"step": 139
},
{
"epoch": 0.6763285024154589,
"grad_norm": 0.12055017054080963,
"learning_rate": 2.702702702702703e-05,
"loss": 0.0307,
"step": 140
},
{
"epoch": 0.6811594202898551,
"grad_norm": 0.10987816751003265,
"learning_rate": 2.7220077220077223e-05,
"loss": 0.0261,
"step": 141
},
{
"epoch": 0.6859903381642513,
"grad_norm": 0.14209432899951935,
"learning_rate": 2.7413127413127415e-05,
"loss": 0.0456,
"step": 142
},
{
"epoch": 0.6908212560386473,
"grad_norm": 0.11652625352144241,
"learning_rate": 2.7606177606177608e-05,
"loss": 0.0358,
"step": 143
},
{
"epoch": 0.6956521739130435,
"grad_norm": 0.12266818434000015,
"learning_rate": 2.77992277992278e-05,
"loss": 0.0378,
"step": 144
},
{
"epoch": 0.7004830917874396,
"grad_norm": 0.11468692868947983,
"learning_rate": 2.7992277992277993e-05,
"loss": 0.0346,
"step": 145
},
{
"epoch": 0.7053140096618358,
"grad_norm": 0.1078886017203331,
"learning_rate": 2.8185328185328186e-05,
"loss": 0.034,
"step": 146
},
{
"epoch": 0.7101449275362319,
"grad_norm": 0.12835174798965454,
"learning_rate": 2.8378378378378378e-05,
"loss": 0.0444,
"step": 147
},
{
"epoch": 0.714975845410628,
"grad_norm": 0.12754280865192413,
"learning_rate": 2.857142857142857e-05,
"loss": 0.0364,
"step": 148
},
{
"epoch": 0.7198067632850241,
"grad_norm": 0.10432615876197815,
"learning_rate": 2.8764478764478763e-05,
"loss": 0.0275,
"step": 149
},
{
"epoch": 0.7246376811594203,
"grad_norm": 0.1269528716802597,
"learning_rate": 2.8957528957528956e-05,
"loss": 0.0368,
"step": 150
},
{
"epoch": 0.7246376811594203,
"eval_loss": 0.02661709673702717,
"eval_runtime": 20.3433,
"eval_samples_per_second": 4.916,
"eval_steps_per_second": 0.147,
"step": 150
},
{
"epoch": 0.7294685990338164,
"grad_norm": 0.10792283713817596,
"learning_rate": 2.915057915057915e-05,
"loss": 0.0349,
"step": 151
},
{
"epoch": 0.7342995169082126,
"grad_norm": 0.11990821361541748,
"learning_rate": 2.9343629343629348e-05,
"loss": 0.0401,
"step": 152
},
{
"epoch": 0.7391304347826086,
"grad_norm": 0.12300258129835129,
"learning_rate": 2.953667953667954e-05,
"loss": 0.0332,
"step": 153
},
{
"epoch": 0.7439613526570048,
"grad_norm": 0.1269589215517044,
"learning_rate": 2.9729729729729733e-05,
"loss": 0.0348,
"step": 154
},
{
"epoch": 0.748792270531401,
"grad_norm": 0.1516416221857071,
"learning_rate": 2.9922779922779926e-05,
"loss": 0.0425,
"step": 155
},
{
"epoch": 0.7536231884057971,
"grad_norm": 0.11869463324546814,
"learning_rate": 3.011583011583012e-05,
"loss": 0.0252,
"step": 156
},
{
"epoch": 0.7584541062801933,
"grad_norm": 0.11784904450178146,
"learning_rate": 3.030888030888031e-05,
"loss": 0.0295,
"step": 157
},
{
"epoch": 0.7632850241545893,
"grad_norm": 0.14304758608341217,
"learning_rate": 3.0501930501930504e-05,
"loss": 0.0348,
"step": 158
},
{
"epoch": 0.7681159420289855,
"grad_norm": 0.13934855163097382,
"learning_rate": 3.0694980694980696e-05,
"loss": 0.0314,
"step": 159
},
{
"epoch": 0.7729468599033816,
"grad_norm": 0.15357963740825653,
"learning_rate": 3.088803088803089e-05,
"loss": 0.037,
"step": 160
},
{
"epoch": 0.7777777777777778,
"grad_norm": 0.17761190235614777,
"learning_rate": 3.108108108108108e-05,
"loss": 0.0331,
"step": 161
},
{
"epoch": 0.782608695652174,
"grad_norm": 0.11713641881942749,
"learning_rate": 3.1274131274131274e-05,
"loss": 0.0321,
"step": 162
},
{
"epoch": 0.7874396135265701,
"grad_norm": 0.14508825540542603,
"learning_rate": 3.1467181467181466e-05,
"loss": 0.031,
"step": 163
},
{
"epoch": 0.7922705314009661,
"grad_norm": 0.1814957559108734,
"learning_rate": 3.166023166023166e-05,
"loss": 0.0345,
"step": 164
},
{
"epoch": 0.7971014492753623,
"grad_norm": 0.10010110586881638,
"learning_rate": 3.185328185328185e-05,
"loss": 0.0426,
"step": 165
},
{
"epoch": 0.8019323671497585,
"grad_norm": 0.1516270786523819,
"learning_rate": 3.204633204633205e-05,
"loss": 0.0434,
"step": 166
},
{
"epoch": 0.8067632850241546,
"grad_norm": 0.131612628698349,
"learning_rate": 3.2239382239382244e-05,
"loss": 0.0354,
"step": 167
},
{
"epoch": 0.8115942028985508,
"grad_norm": 0.14485694468021393,
"learning_rate": 3.2432432432432436e-05,
"loss": 0.0322,
"step": 168
},
{
"epoch": 0.8164251207729468,
"grad_norm": 0.18271975219249725,
"learning_rate": 3.262548262548263e-05,
"loss": 0.0338,
"step": 169
},
{
"epoch": 0.821256038647343,
"grad_norm": 0.11214082688093185,
"learning_rate": 3.281853281853282e-05,
"loss": 0.0345,
"step": 170
},
{
"epoch": 0.8260869565217391,
"grad_norm": 0.1456298530101776,
"learning_rate": 3.3011583011583014e-05,
"loss": 0.0355,
"step": 171
},
{
"epoch": 0.8309178743961353,
"grad_norm": 0.17988459765911102,
"learning_rate": 3.3204633204633207e-05,
"loss": 0.0314,
"step": 172
},
{
"epoch": 0.8357487922705314,
"grad_norm": 0.1143067479133606,
"learning_rate": 3.33976833976834e-05,
"loss": 0.035,
"step": 173
},
{
"epoch": 0.8405797101449275,
"grad_norm": 0.18110711872577667,
"learning_rate": 3.359073359073359e-05,
"loss": 0.0296,
"step": 174
},
{
"epoch": 0.8454106280193237,
"grad_norm": 0.13397149741649628,
"learning_rate": 3.3783783783783784e-05,
"loss": 0.0283,
"step": 175
},
{
"epoch": 0.8454106280193237,
"eval_loss": 0.023197688162326813,
"eval_runtime": 20.5122,
"eval_samples_per_second": 4.875,
"eval_steps_per_second": 0.146,
"step": 175
},
{
"epoch": 0.8502415458937198,
"grad_norm": 0.14032766222953796,
"learning_rate": 3.397683397683398e-05,
"loss": 0.0397,
"step": 176
},
{
"epoch": 0.855072463768116,
"grad_norm": 0.12052756547927856,
"learning_rate": 3.416988416988417e-05,
"loss": 0.0269,
"step": 177
},
{
"epoch": 0.8599033816425121,
"grad_norm": 0.14283765852451324,
"learning_rate": 3.436293436293436e-05,
"loss": 0.0328,
"step": 178
},
{
"epoch": 0.8647342995169082,
"grad_norm": 0.12751182913780212,
"learning_rate": 3.4555984555984555e-05,
"loss": 0.0284,
"step": 179
},
{
"epoch": 0.8695652173913043,
"grad_norm": 0.09834396094083786,
"learning_rate": 3.4749034749034754e-05,
"loss": 0.0232,
"step": 180
},
{
"epoch": 0.8743961352657005,
"grad_norm": 0.17338038980960846,
"learning_rate": 3.4942084942084947e-05,
"loss": 0.0354,
"step": 181
},
{
"epoch": 0.8792270531400966,
"grad_norm": 0.1350838541984558,
"learning_rate": 3.513513513513514e-05,
"loss": 0.0354,
"step": 182
},
{
"epoch": 0.8840579710144928,
"grad_norm": 0.12952224910259247,
"learning_rate": 3.532818532818533e-05,
"loss": 0.0294,
"step": 183
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.1229347437620163,
"learning_rate": 3.5521235521235524e-05,
"loss": 0.0303,
"step": 184
},
{
"epoch": 0.893719806763285,
"grad_norm": 0.11522738635540009,
"learning_rate": 3.571428571428572e-05,
"loss": 0.0296,
"step": 185
},
{
"epoch": 0.8985507246376812,
"grad_norm": 0.1309077888727188,
"learning_rate": 3.590733590733591e-05,
"loss": 0.0338,
"step": 186
},
{
"epoch": 0.9033816425120773,
"grad_norm": 0.1434311419725418,
"learning_rate": 3.61003861003861e-05,
"loss": 0.0375,
"step": 187
},
{
"epoch": 0.9082125603864735,
"grad_norm": 0.13514019548892975,
"learning_rate": 3.6293436293436295e-05,
"loss": 0.0344,
"step": 188
},
{
"epoch": 0.9130434782608695,
"grad_norm": 0.13007153570652008,
"learning_rate": 3.648648648648649e-05,
"loss": 0.0293,
"step": 189
},
{
"epoch": 0.9178743961352657,
"grad_norm": 0.10133199393749237,
"learning_rate": 3.667953667953668e-05,
"loss": 0.0196,
"step": 190
},
{
"epoch": 0.9227053140096618,
"grad_norm": 0.15782149136066437,
"learning_rate": 3.687258687258687e-05,
"loss": 0.0294,
"step": 191
},
{
"epoch": 0.927536231884058,
"grad_norm": 0.10956969112157822,
"learning_rate": 3.7065637065637065e-05,
"loss": 0.0274,
"step": 192
},
{
"epoch": 0.9323671497584541,
"grad_norm": 0.1855483055114746,
"learning_rate": 3.725868725868726e-05,
"loss": 0.0326,
"step": 193
},
{
"epoch": 0.9371980676328503,
"grad_norm": 0.14759743213653564,
"learning_rate": 3.745173745173745e-05,
"loss": 0.0281,
"step": 194
},
{
"epoch": 0.9420289855072463,
"grad_norm": 0.19224123656749725,
"learning_rate": 3.764478764478765e-05,
"loss": 0.0235,
"step": 195
},
{
"epoch": 0.9468599033816425,
"grad_norm": 0.12383207678794861,
"learning_rate": 3.783783783783784e-05,
"loss": 0.0227,
"step": 196
},
{
"epoch": 0.9516908212560387,
"grad_norm": 0.13436304032802582,
"learning_rate": 3.8030888030888035e-05,
"loss": 0.0248,
"step": 197
},
{
"epoch": 0.9565217391304348,
"grad_norm": 0.11358322203159332,
"learning_rate": 3.822393822393823e-05,
"loss": 0.0272,
"step": 198
},
{
"epoch": 0.961352657004831,
"grad_norm": 0.09752532094717026,
"learning_rate": 3.841698841698842e-05,
"loss": 0.0264,
"step": 199
},
{
"epoch": 0.966183574879227,
"grad_norm": 0.11581085622310638,
"learning_rate": 3.861003861003861e-05,
"loss": 0.0237,
"step": 200
},
{
"epoch": 0.966183574879227,
"eval_loss": 0.021242819726467133,
"eval_runtime": 20.3025,
"eval_samples_per_second": 4.925,
"eval_steps_per_second": 0.148,
"step": 200
},
{
"epoch": 0.9710144927536232,
"grad_norm": 0.11759760975837708,
"learning_rate": 3.8803088803088805e-05,
"loss": 0.0357,
"step": 201
},
{
"epoch": 0.9758454106280193,
"grad_norm": 0.1146010234951973,
"learning_rate": 3.8996138996139e-05,
"loss": 0.0293,
"step": 202
},
{
"epoch": 0.9806763285024155,
"grad_norm": 0.08743493258953094,
"learning_rate": 3.918918918918919e-05,
"loss": 0.0233,
"step": 203
},
{
"epoch": 0.9855072463768116,
"grad_norm": 0.09883563965559006,
"learning_rate": 3.938223938223938e-05,
"loss": 0.0277,
"step": 204
},
{
"epoch": 0.9903381642512077,
"grad_norm": 0.1223803460597992,
"learning_rate": 3.9575289575289576e-05,
"loss": 0.033,
"step": 205
},
{
"epoch": 0.9951690821256038,
"grad_norm": 0.10520818829536438,
"learning_rate": 3.976833976833977e-05,
"loss": 0.0285,
"step": 206
},
{
"epoch": 1.0,
"grad_norm": 0.19492344558238983,
"learning_rate": 3.996138996138996e-05,
"loss": 0.0287,
"step": 207
},
{
"epoch": 1.0048309178743962,
"grad_norm": 0.1388760209083557,
"learning_rate": 4.015444015444015e-05,
"loss": 0.0276,
"step": 208
},
{
"epoch": 1.0096618357487923,
"grad_norm": 0.10627097636461258,
"learning_rate": 4.034749034749035e-05,
"loss": 0.0263,
"step": 209
},
{
"epoch": 1.0144927536231885,
"grad_norm": 0.13346248865127563,
"learning_rate": 4.0540540540540545e-05,
"loss": 0.0222,
"step": 210
},
{
"epoch": 1.0193236714975846,
"grad_norm": 0.14749185740947723,
"learning_rate": 4.073359073359074e-05,
"loss": 0.0313,
"step": 211
},
{
"epoch": 1.0241545893719808,
"grad_norm": 0.14564776420593262,
"learning_rate": 4.092664092664093e-05,
"loss": 0.0282,
"step": 212
},
{
"epoch": 1.0289855072463767,
"grad_norm": 0.12179957330226898,
"learning_rate": 4.111969111969112e-05,
"loss": 0.0223,
"step": 213
},
{
"epoch": 1.0338164251207729,
"grad_norm": 0.12672066688537598,
"learning_rate": 4.1312741312741316e-05,
"loss": 0.0264,
"step": 214
},
{
"epoch": 1.038647342995169,
"grad_norm": 0.13626433908939362,
"learning_rate": 4.150579150579151e-05,
"loss": 0.0299,
"step": 215
},
{
"epoch": 1.0434782608695652,
"grad_norm": 0.09698859602212906,
"learning_rate": 4.16988416988417e-05,
"loss": 0.0268,
"step": 216
},
{
"epoch": 1.0483091787439613,
"grad_norm": 0.16582411527633667,
"learning_rate": 4.189189189189189e-05,
"loss": 0.0251,
"step": 217
},
{
"epoch": 1.0531400966183575,
"grad_norm": 0.12611252069473267,
"learning_rate": 4.2084942084942086e-05,
"loss": 0.0289,
"step": 218
},
{
"epoch": 1.0579710144927537,
"grad_norm": 0.12253251671791077,
"learning_rate": 4.227799227799228e-05,
"loss": 0.028,
"step": 219
},
{
"epoch": 1.0628019323671498,
"grad_norm": 0.12088047713041306,
"learning_rate": 4.247104247104247e-05,
"loss": 0.0296,
"step": 220
},
{
"epoch": 1.067632850241546,
"grad_norm": 0.13391311466693878,
"learning_rate": 4.2664092664092664e-05,
"loss": 0.0229,
"step": 221
},
{
"epoch": 1.0724637681159421,
"grad_norm": 0.12342043966054916,
"learning_rate": 4.2857142857142856e-05,
"loss": 0.0209,
"step": 222
},
{
"epoch": 1.077294685990338,
"grad_norm": 0.117287777364254,
"learning_rate": 4.305019305019305e-05,
"loss": 0.0255,
"step": 223
},
{
"epoch": 1.0821256038647342,
"grad_norm": 0.15881749987602234,
"learning_rate": 4.324324324324325e-05,
"loss": 0.0191,
"step": 224
},
{
"epoch": 1.0869565217391304,
"grad_norm": 0.14633190631866455,
"learning_rate": 4.343629343629344e-05,
"loss": 0.0234,
"step": 225
},
{
"epoch": 1.0869565217391304,
"eval_loss": 0.019356004893779755,
"eval_runtime": 20.3141,
"eval_samples_per_second": 4.923,
"eval_steps_per_second": 0.148,
"step": 225
},
{
"epoch": 1.0917874396135265,
"grad_norm": 0.14119146764278412,
"learning_rate": 4.3629343629343633e-05,
"loss": 0.0262,
"step": 226
},
{
"epoch": 1.0966183574879227,
"grad_norm": 0.15874817967414856,
"learning_rate": 4.3822393822393826e-05,
"loss": 0.0219,
"step": 227
},
{
"epoch": 1.1014492753623188,
"grad_norm": 0.10163708031177521,
"learning_rate": 4.401544401544402e-05,
"loss": 0.0239,
"step": 228
},
{
"epoch": 1.106280193236715,
"grad_norm": 0.1453448086977005,
"learning_rate": 4.420849420849421e-05,
"loss": 0.03,
"step": 229
},
{
"epoch": 1.1111111111111112,
"grad_norm": 0.11025876551866531,
"learning_rate": 4.4401544401544404e-05,
"loss": 0.0256,
"step": 230
},
{
"epoch": 1.1159420289855073,
"grad_norm": 0.08861984312534332,
"learning_rate": 4.4594594594594596e-05,
"loss": 0.0271,
"step": 231
},
{
"epoch": 1.1207729468599035,
"grad_norm": 0.08897382766008377,
"learning_rate": 4.478764478764479e-05,
"loss": 0.024,
"step": 232
},
{
"epoch": 1.1256038647342996,
"grad_norm": 0.10531286150217056,
"learning_rate": 4.498069498069498e-05,
"loss": 0.0273,
"step": 233
},
{
"epoch": 1.1304347826086956,
"grad_norm": 0.10693822056055069,
"learning_rate": 4.5173745173745174e-05,
"loss": 0.024,
"step": 234
},
{
"epoch": 1.1352657004830917,
"grad_norm": 0.08394042402505875,
"learning_rate": 4.536679536679537e-05,
"loss": 0.0177,
"step": 235
},
{
"epoch": 1.1400966183574879,
"grad_norm": 0.13688276708126068,
"learning_rate": 4.555984555984556e-05,
"loss": 0.0292,
"step": 236
},
{
"epoch": 1.144927536231884,
"grad_norm": 0.11339515447616577,
"learning_rate": 4.575289575289575e-05,
"loss": 0.0171,
"step": 237
},
{
"epoch": 1.1497584541062802,
"grad_norm": 0.12284926325082779,
"learning_rate": 4.594594594594595e-05,
"loss": 0.0211,
"step": 238
},
{
"epoch": 1.1545893719806763,
"grad_norm": 0.16894559562206268,
"learning_rate": 4.6138996138996144e-05,
"loss": 0.0369,
"step": 239
},
{
"epoch": 1.1594202898550725,
"grad_norm": 0.09520669281482697,
"learning_rate": 4.6332046332046336e-05,
"loss": 0.0211,
"step": 240
},
{
"epoch": 1.1642512077294687,
"grad_norm": 0.13406948745250702,
"learning_rate": 4.652509652509653e-05,
"loss": 0.0232,
"step": 241
},
{
"epoch": 1.1690821256038648,
"grad_norm": 0.09464199095964432,
"learning_rate": 4.671814671814672e-05,
"loss": 0.0267,
"step": 242
},
{
"epoch": 1.1739130434782608,
"grad_norm": 0.10455941408872604,
"learning_rate": 4.6911196911196914e-05,
"loss": 0.0199,
"step": 243
},
{
"epoch": 1.178743961352657,
"grad_norm": 0.13815800845623016,
"learning_rate": 4.710424710424711e-05,
"loss": 0.0224,
"step": 244
},
{
"epoch": 1.183574879227053,
"grad_norm": 0.1540479212999344,
"learning_rate": 4.72972972972973e-05,
"loss": 0.0321,
"step": 245
},
{
"epoch": 1.1884057971014492,
"grad_norm": 0.129620760679245,
"learning_rate": 4.749034749034749e-05,
"loss": 0.0184,
"step": 246
},
{
"epoch": 1.1932367149758454,
"grad_norm": 0.14373724162578583,
"learning_rate": 4.7683397683397685e-05,
"loss": 0.0279,
"step": 247
},
{
"epoch": 1.1980676328502415,
"grad_norm": 0.09800299257040024,
"learning_rate": 4.787644787644788e-05,
"loss": 0.0149,
"step": 248
},
{
"epoch": 1.2028985507246377,
"grad_norm": 0.11203493922948837,
"learning_rate": 4.806949806949807e-05,
"loss": 0.026,
"step": 249
},
{
"epoch": 1.2077294685990339,
"grad_norm": 0.12114997208118439,
"learning_rate": 4.826254826254826e-05,
"loss": 0.0232,
"step": 250
},
{
"epoch": 1.2077294685990339,
"eval_loss": 0.01758512854576111,
"eval_runtime": 20.3081,
"eval_samples_per_second": 4.924,
"eval_steps_per_second": 0.148,
"step": 250
},
{
"epoch": 1.21256038647343,
"grad_norm": 0.08669663965702057,
"learning_rate": 4.8455598455598455e-05,
"loss": 0.02,
"step": 251
},
{
"epoch": 1.2173913043478262,
"grad_norm": 0.08553537726402283,
"learning_rate": 4.8648648648648654e-05,
"loss": 0.0129,
"step": 252
},
{
"epoch": 1.2222222222222223,
"grad_norm": 0.0841841846704483,
"learning_rate": 4.884169884169885e-05,
"loss": 0.0191,
"step": 253
},
{
"epoch": 1.2270531400966185,
"grad_norm": 0.10962934792041779,
"learning_rate": 4.903474903474904e-05,
"loss": 0.0218,
"step": 254
},
{
"epoch": 1.2318840579710144,
"grad_norm": 0.0924534946680069,
"learning_rate": 4.922779922779923e-05,
"loss": 0.0284,
"step": 255
},
{
"epoch": 1.2367149758454106,
"grad_norm": 0.10882215946912766,
"learning_rate": 4.9420849420849425e-05,
"loss": 0.0215,
"step": 256
},
{
"epoch": 1.2415458937198067,
"grad_norm": 0.11583147943019867,
"learning_rate": 4.961389961389962e-05,
"loss": 0.0235,
"step": 257
},
{
"epoch": 1.2463768115942029,
"grad_norm": 0.10939005017280579,
"learning_rate": 4.980694980694981e-05,
"loss": 0.0291,
"step": 258
},
{
"epoch": 1.251207729468599,
"grad_norm": 0.10021679848432541,
"learning_rate": 5e-05,
"loss": 0.022,
"step": 259
},
{
"epoch": 1.2560386473429952,
"grad_norm": 0.10946795344352722,
"learning_rate": 5.0193050193050195e-05,
"loss": 0.0224,
"step": 260
},
{
"epoch": 1.2608695652173914,
"grad_norm": 0.10062968730926514,
"learning_rate": 5.038610038610039e-05,
"loss": 0.0246,
"step": 261
},
{
"epoch": 1.2657004830917875,
"grad_norm": 0.09000770002603531,
"learning_rate": 5.057915057915058e-05,
"loss": 0.0214,
"step": 262
},
{
"epoch": 1.2705314009661834,
"grad_norm": 0.09875966608524323,
"learning_rate": 5.077220077220077e-05,
"loss": 0.0224,
"step": 263
},
{
"epoch": 1.2753623188405796,
"grad_norm": 0.14588375389575958,
"learning_rate": 5.0965250965250965e-05,
"loss": 0.0233,
"step": 264
},
{
"epoch": 1.2801932367149758,
"grad_norm": 0.08824263513088226,
"learning_rate": 5.115830115830116e-05,
"loss": 0.0183,
"step": 265
},
{
"epoch": 1.285024154589372,
"grad_norm": 0.1460726410150528,
"learning_rate": 5.135135135135135e-05,
"loss": 0.0296,
"step": 266
},
{
"epoch": 1.289855072463768,
"grad_norm": 0.10470607876777649,
"learning_rate": 5.154440154440154e-05,
"loss": 0.025,
"step": 267
},
{
"epoch": 1.2946859903381642,
"grad_norm": 0.1622585654258728,
"learning_rate": 5.1737451737451736e-05,
"loss": 0.0241,
"step": 268
},
{
"epoch": 1.2995169082125604,
"grad_norm": 0.11767762899398804,
"learning_rate": 5.193050193050193e-05,
"loss": 0.0272,
"step": 269
},
{
"epoch": 1.3043478260869565,
"grad_norm": 0.154914990067482,
"learning_rate": 5.212355212355212e-05,
"loss": 0.0268,
"step": 270
},
{
"epoch": 1.3091787439613527,
"grad_norm": 0.1314283013343811,
"learning_rate": 5.2316602316602313e-05,
"loss": 0.0232,
"step": 271
},
{
"epoch": 1.3140096618357489,
"grad_norm": 0.10727723687887192,
"learning_rate": 5.2509652509652506e-05,
"loss": 0.0241,
"step": 272
},
{
"epoch": 1.318840579710145,
"grad_norm": 0.13747063279151917,
"learning_rate": 5.27027027027027e-05,
"loss": 0.0239,
"step": 273
},
{
"epoch": 1.3236714975845412,
"grad_norm": 0.08335089683532715,
"learning_rate": 5.2895752895752905e-05,
"loss": 0.0182,
"step": 274
},
{
"epoch": 1.3285024154589373,
"grad_norm": 0.12808988988399506,
"learning_rate": 5.30888030888031e-05,
"loss": 0.0307,
"step": 275
},
{
"epoch": 1.3285024154589373,
"eval_loss": 0.01781173050403595,
"eval_runtime": 20.3141,
"eval_samples_per_second": 4.923,
"eval_steps_per_second": 0.148,
"step": 275
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.10613222420215607,
"learning_rate": 5.328185328185329e-05,
"loss": 0.0228,
"step": 276
},
{
"epoch": 1.3381642512077294,
"grad_norm": 0.09993978589773178,
"learning_rate": 5.347490347490348e-05,
"loss": 0.0239,
"step": 277
},
{
"epoch": 1.3429951690821256,
"grad_norm": 0.08622147142887115,
"learning_rate": 5.3667953667953675e-05,
"loss": 0.0192,
"step": 278
},
{
"epoch": 1.3478260869565217,
"grad_norm": 0.08509330451488495,
"learning_rate": 5.386100386100387e-05,
"loss": 0.0153,
"step": 279
},
{
"epoch": 1.3526570048309179,
"grad_norm": 0.08358469605445862,
"learning_rate": 5.405405405405406e-05,
"loss": 0.0168,
"step": 280
},
{
"epoch": 1.357487922705314,
"grad_norm": 0.09522559493780136,
"learning_rate": 5.424710424710425e-05,
"loss": 0.0233,
"step": 281
},
{
"epoch": 1.3623188405797102,
"grad_norm": 0.07689143717288971,
"learning_rate": 5.4440154440154445e-05,
"loss": 0.0181,
"step": 282
},
{
"epoch": 1.3671497584541064,
"grad_norm": 0.08445203304290771,
"learning_rate": 5.463320463320464e-05,
"loss": 0.0176,
"step": 283
},
{
"epoch": 1.3719806763285023,
"grad_norm": 0.13829359412193298,
"learning_rate": 5.482625482625483e-05,
"loss": 0.0212,
"step": 284
},
{
"epoch": 1.3768115942028984,
"grad_norm": 0.09589879214763641,
"learning_rate": 5.501930501930502e-05,
"loss": 0.0195,
"step": 285
},
{
"epoch": 1.3816425120772946,
"grad_norm": 0.08803611993789673,
"learning_rate": 5.5212355212355216e-05,
"loss": 0.0194,
"step": 286
},
{
"epoch": 1.3864734299516908,
"grad_norm": 0.08383037894964218,
"learning_rate": 5.540540540540541e-05,
"loss": 0.0198,
"step": 287
},
{
"epoch": 1.391304347826087,
"grad_norm": 0.1102117970585823,
"learning_rate": 5.55984555984556e-05,
"loss": 0.0246,
"step": 288
},
{
"epoch": 1.396135265700483,
"grad_norm": 0.09469425678253174,
"learning_rate": 5.5791505791505794e-05,
"loss": 0.0266,
"step": 289
},
{
"epoch": 1.4009661835748792,
"grad_norm": 0.10489413887262344,
"learning_rate": 5.5984555984555986e-05,
"loss": 0.0319,
"step": 290
},
{
"epoch": 1.4057971014492754,
"grad_norm": 0.0961574912071228,
"learning_rate": 5.617760617760618e-05,
"loss": 0.0171,
"step": 291
},
{
"epoch": 1.4106280193236715,
"grad_norm": 0.09252733737230301,
"learning_rate": 5.637065637065637e-05,
"loss": 0.0222,
"step": 292
},
{
"epoch": 1.4154589371980677,
"grad_norm": 0.08882331103086472,
"learning_rate": 5.6563706563706564e-05,
"loss": 0.0233,
"step": 293
},
{
"epoch": 1.4202898550724639,
"grad_norm": 0.0826525092124939,
"learning_rate": 5.6756756756756757e-05,
"loss": 0.0184,
"step": 294
},
{
"epoch": 1.42512077294686,
"grad_norm": 0.06665649265050888,
"learning_rate": 5.694980694980695e-05,
"loss": 0.0154,
"step": 295
},
{
"epoch": 1.4299516908212562,
"grad_norm": 0.11430980265140533,
"learning_rate": 5.714285714285714e-05,
"loss": 0.0182,
"step": 296
},
{
"epoch": 1.434782608695652,
"grad_norm": 0.10717899352312088,
"learning_rate": 5.7335907335907334e-05,
"loss": 0.0189,
"step": 297
},
{
"epoch": 1.4396135265700483,
"grad_norm": 0.11982893198728561,
"learning_rate": 5.752895752895753e-05,
"loss": 0.0186,
"step": 298
},
{
"epoch": 1.4444444444444444,
"grad_norm": 0.10576062649488449,
"learning_rate": 5.772200772200772e-05,
"loss": 0.019,
"step": 299
},
{
"epoch": 1.4492753623188406,
"grad_norm": 0.12444712966680527,
"learning_rate": 5.791505791505791e-05,
"loss": 0.0228,
"step": 300
},
{
"epoch": 1.4492753623188406,
"eval_loss": 0.014667052775621414,
"eval_runtime": 20.3168,
"eval_samples_per_second": 4.922,
"eval_steps_per_second": 0.148,
"step": 300
},
{
"epoch": 1.4541062801932367,
"grad_norm": 0.08958641439676285,
"learning_rate": 5.8108108108108105e-05,
"loss": 0.0201,
"step": 301
},
{
"epoch": 1.458937198067633,
"grad_norm": 0.10375913977622986,
"learning_rate": 5.83011583011583e-05,
"loss": 0.0262,
"step": 302
},
{
"epoch": 1.463768115942029,
"grad_norm": 0.09301627427339554,
"learning_rate": 5.8494208494208503e-05,
"loss": 0.0208,
"step": 303
},
{
"epoch": 1.4685990338164252,
"grad_norm": 0.08783616125583649,
"learning_rate": 5.8687258687258696e-05,
"loss": 0.0248,
"step": 304
},
{
"epoch": 1.4734299516908211,
"grad_norm": 0.07414194941520691,
"learning_rate": 5.888030888030889e-05,
"loss": 0.019,
"step": 305
},
{
"epoch": 1.4782608695652173,
"grad_norm": 0.07132302969694138,
"learning_rate": 5.907335907335908e-05,
"loss": 0.0173,
"step": 306
},
{
"epoch": 1.4830917874396135,
"grad_norm": 0.09330809116363525,
"learning_rate": 5.9266409266409274e-05,
"loss": 0.0228,
"step": 307
},
{
"epoch": 1.4879227053140096,
"grad_norm": 0.06476512551307678,
"learning_rate": 5.9459459459459466e-05,
"loss": 0.0178,
"step": 308
},
{
"epoch": 1.4927536231884058,
"grad_norm": 0.08418332040309906,
"learning_rate": 5.965250965250966e-05,
"loss": 0.0176,
"step": 309
},
{
"epoch": 1.497584541062802,
"grad_norm": 0.0991380512714386,
"learning_rate": 5.984555984555985e-05,
"loss": 0.0207,
"step": 310
},
{
"epoch": 1.502415458937198,
"grad_norm": 0.0704076737165451,
"learning_rate": 6.0038610038610044e-05,
"loss": 0.0159,
"step": 311
},
{
"epoch": 1.5072463768115942,
"grad_norm": 0.12095512449741364,
"learning_rate": 6.023166023166024e-05,
"loss": 0.0208,
"step": 312
},
{
"epoch": 1.5120772946859904,
"grad_norm": 0.0958373174071312,
"learning_rate": 6.042471042471043e-05,
"loss": 0.0165,
"step": 313
},
{
"epoch": 1.5169082125603865,
"grad_norm": 0.07881686091423035,
"learning_rate": 6.061776061776062e-05,
"loss": 0.018,
"step": 314
},
{
"epoch": 1.5217391304347827,
"grad_norm": 0.10672390460968018,
"learning_rate": 6.0810810810810814e-05,
"loss": 0.0241,
"step": 315
},
{
"epoch": 1.5265700483091789,
"grad_norm": 0.09105593711137772,
"learning_rate": 6.100386100386101e-05,
"loss": 0.0173,
"step": 316
},
{
"epoch": 1.531400966183575,
"grad_norm": 0.07651592046022415,
"learning_rate": 6.11969111969112e-05,
"loss": 0.0183,
"step": 317
},
{
"epoch": 1.5362318840579712,
"grad_norm": 0.12212449312210083,
"learning_rate": 6.138996138996139e-05,
"loss": 0.0206,
"step": 318
},
{
"epoch": 1.541062801932367,
"grad_norm": 0.08930546045303345,
"learning_rate": 6.158301158301159e-05,
"loss": 0.0195,
"step": 319
},
{
"epoch": 1.5458937198067633,
"grad_norm": 0.18044796586036682,
"learning_rate": 6.177606177606178e-05,
"loss": 0.0227,
"step": 320
},
{
"epoch": 1.5507246376811594,
"grad_norm": 0.13932865858078003,
"learning_rate": 6.196911196911198e-05,
"loss": 0.0458,
"step": 321
},
{
"epoch": 1.5555555555555556,
"grad_norm": 0.13868370652198792,
"learning_rate": 6.216216216216216e-05,
"loss": 0.0163,
"step": 322
},
{
"epoch": 1.5603864734299517,
"grad_norm": 0.09221283346414566,
"learning_rate": 6.235521235521236e-05,
"loss": 0.0237,
"step": 323
},
{
"epoch": 1.5652173913043477,
"grad_norm": 0.12691855430603027,
"learning_rate": 6.254826254826255e-05,
"loss": 0.0205,
"step": 324
},
{
"epoch": 1.5700483091787438,
"grad_norm": 0.0665147602558136,
"learning_rate": 6.274131274131275e-05,
"loss": 0.0167,
"step": 325
},
{
"epoch": 1.5700483091787438,
"eval_loss": 0.01550750620663166,
"eval_runtime": 20.3076,
"eval_samples_per_second": 4.924,
"eval_steps_per_second": 0.148,
"step": 325
},
{
"epoch": 1.57487922705314,
"grad_norm": 0.08152501285076141,
"learning_rate": 6.293436293436293e-05,
"loss": 0.0188,
"step": 326
},
{
"epoch": 1.5797101449275361,
"grad_norm": 0.08206786960363388,
"learning_rate": 6.312741312741313e-05,
"loss": 0.0217,
"step": 327
},
{
"epoch": 1.5845410628019323,
"grad_norm": 0.06393270939588547,
"learning_rate": 6.332046332046332e-05,
"loss": 0.0257,
"step": 328
},
{
"epoch": 1.5893719806763285,
"grad_norm": 0.08677301555871964,
"learning_rate": 6.351351351351352e-05,
"loss": 0.0239,
"step": 329
},
{
"epoch": 1.5942028985507246,
"grad_norm": 0.06595584750175476,
"learning_rate": 6.37065637065637e-05,
"loss": 0.0141,
"step": 330
},
{
"epoch": 1.5990338164251208,
"grad_norm": 0.07364228367805481,
"learning_rate": 6.38996138996139e-05,
"loss": 0.0225,
"step": 331
},
{
"epoch": 1.603864734299517,
"grad_norm": 0.0809311717748642,
"learning_rate": 6.40926640926641e-05,
"loss": 0.0165,
"step": 332
},
{
"epoch": 1.608695652173913,
"grad_norm": 0.08457648009061813,
"learning_rate": 6.428571428571429e-05,
"loss": 0.0233,
"step": 333
},
{
"epoch": 1.6135265700483092,
"grad_norm": 0.10034564137458801,
"learning_rate": 6.447876447876449e-05,
"loss": 0.0208,
"step": 334
},
{
"epoch": 1.6183574879227054,
"grad_norm": 0.07684129476547241,
"learning_rate": 6.467181467181467e-05,
"loss": 0.0189,
"step": 335
},
{
"epoch": 1.6231884057971016,
"grad_norm": 0.12758824229240417,
"learning_rate": 6.486486486486487e-05,
"loss": 0.0361,
"step": 336
},
{
"epoch": 1.6280193236714977,
"grad_norm": 0.07505548000335693,
"learning_rate": 6.505791505791506e-05,
"loss": 0.0179,
"step": 337
},
{
"epoch": 1.6328502415458939,
"grad_norm": 0.08614417910575867,
"learning_rate": 6.525096525096526e-05,
"loss": 0.0228,
"step": 338
},
{
"epoch": 1.6376811594202898,
"grad_norm": 0.09482710063457489,
"learning_rate": 6.544401544401544e-05,
"loss": 0.0233,
"step": 339
},
{
"epoch": 1.642512077294686,
"grad_norm": 0.09455744177103043,
"learning_rate": 6.563706563706564e-05,
"loss": 0.023,
"step": 340
},
{
"epoch": 1.6473429951690821,
"grad_norm": 0.07523331791162491,
"learning_rate": 6.583011583011583e-05,
"loss": 0.0184,
"step": 341
},
{
"epoch": 1.6521739130434783,
"grad_norm": 0.08421140909194946,
"learning_rate": 6.602316602316603e-05,
"loss": 0.0248,
"step": 342
},
{
"epoch": 1.6570048309178744,
"grad_norm": 0.07453511655330658,
"learning_rate": 6.621621621621621e-05,
"loss": 0.0205,
"step": 343
},
{
"epoch": 1.6618357487922706,
"grad_norm": 0.08574186265468597,
"learning_rate": 6.640926640926641e-05,
"loss": 0.0163,
"step": 344
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.07299024611711502,
"learning_rate": 6.66023166023166e-05,
"loss": 0.0253,
"step": 345
},
{
"epoch": 1.6714975845410627,
"grad_norm": 0.07789258658885956,
"learning_rate": 6.67953667953668e-05,
"loss": 0.0102,
"step": 346
},
{
"epoch": 1.6763285024154588,
"grad_norm": 0.10461621731519699,
"learning_rate": 6.6988416988417e-05,
"loss": 0.0209,
"step": 347
},
{
"epoch": 1.681159420289855,
"grad_norm": 0.09494093060493469,
"learning_rate": 6.718146718146718e-05,
"loss": 0.0232,
"step": 348
},
{
"epoch": 1.6859903381642511,
"grad_norm": 0.06846007704734802,
"learning_rate": 6.737451737451738e-05,
"loss": 0.0124,
"step": 349
},
{
"epoch": 1.6908212560386473,
"grad_norm": 0.1044989824295044,
"learning_rate": 6.756756756756757e-05,
"loss": 0.0238,
"step": 350
},
{
"epoch": 1.6908212560386473,
"eval_loss": 0.012536454014480114,
"eval_runtime": 20.2976,
"eval_samples_per_second": 4.927,
"eval_steps_per_second": 0.148,
"step": 350
},
{
"epoch": 1.6956521739130435,
"grad_norm": 0.08567062020301819,
"learning_rate": 6.776061776061777e-05,
"loss": 0.0143,
"step": 351
},
{
"epoch": 1.7004830917874396,
"grad_norm": 0.08636616915464401,
"learning_rate": 6.795366795366795e-05,
"loss": 0.0185,
"step": 352
},
{
"epoch": 1.7053140096618358,
"grad_norm": 0.1519186943769455,
"learning_rate": 6.814671814671815e-05,
"loss": 0.0374,
"step": 353
},
{
"epoch": 1.710144927536232,
"grad_norm": 0.11869102716445923,
"learning_rate": 6.833976833976834e-05,
"loss": 0.0202,
"step": 354
},
{
"epoch": 1.714975845410628,
"grad_norm": 0.07147668302059174,
"learning_rate": 6.853281853281854e-05,
"loss": 0.0259,
"step": 355
},
{
"epoch": 1.7198067632850242,
"grad_norm": 0.08442330360412598,
"learning_rate": 6.872586872586872e-05,
"loss": 0.0155,
"step": 356
},
{
"epoch": 1.7246376811594204,
"grad_norm": 0.10003894567489624,
"learning_rate": 6.891891891891892e-05,
"loss": 0.0219,
"step": 357
},
{
"epoch": 1.7294685990338166,
"grad_norm": 0.08303581923246384,
"learning_rate": 6.911196911196911e-05,
"loss": 0.0137,
"step": 358
},
{
"epoch": 1.7342995169082127,
"grad_norm": 0.06369996815919876,
"learning_rate": 6.930501930501931e-05,
"loss": 0.0145,
"step": 359
},
{
"epoch": 1.7391304347826086,
"grad_norm": 0.09714517742395401,
"learning_rate": 6.949806949806951e-05,
"loss": 0.0179,
"step": 360
},
{
"epoch": 1.7439613526570048,
"grad_norm": 0.10487079620361328,
"learning_rate": 6.96911196911197e-05,
"loss": 0.023,
"step": 361
},
{
"epoch": 1.748792270531401,
"grad_norm": 0.1678726226091385,
"learning_rate": 6.988416988416989e-05,
"loss": 0.0181,
"step": 362
},
{
"epoch": 1.7536231884057971,
"grad_norm": 0.13542933762073517,
"learning_rate": 7.007722007722008e-05,
"loss": 0.0259,
"step": 363
},
{
"epoch": 1.7584541062801933,
"grad_norm": 0.10919655114412308,
"learning_rate": 7.027027027027028e-05,
"loss": 0.0206,
"step": 364
},
{
"epoch": 1.7632850241545892,
"grad_norm": 0.08248183876276016,
"learning_rate": 7.046332046332046e-05,
"loss": 0.0131,
"step": 365
},
{
"epoch": 1.7681159420289854,
"grad_norm": 0.10803995281457901,
"learning_rate": 7.065637065637066e-05,
"loss": 0.0179,
"step": 366
},
{
"epoch": 1.7729468599033815,
"grad_norm": 0.12460337579250336,
"learning_rate": 7.084942084942085e-05,
"loss": 0.0186,
"step": 367
},
{
"epoch": 1.7777777777777777,
"grad_norm": 0.08366953581571579,
"learning_rate": 7.104247104247105e-05,
"loss": 0.0161,
"step": 368
},
{
"epoch": 1.7826086956521738,
"grad_norm": 0.12464456260204315,
"learning_rate": 7.123552123552123e-05,
"loss": 0.0263,
"step": 369
},
{
"epoch": 1.78743961352657,
"grad_norm": 0.06674820184707642,
"learning_rate": 7.142857142857143e-05,
"loss": 0.0124,
"step": 370
},
{
"epoch": 1.7922705314009661,
"grad_norm": 0.13090987503528595,
"learning_rate": 7.162162162162162e-05,
"loss": 0.0239,
"step": 371
},
{
"epoch": 1.7971014492753623,
"grad_norm": 0.08615607768297195,
"learning_rate": 7.181467181467182e-05,
"loss": 0.0192,
"step": 372
},
{
"epoch": 1.8019323671497585,
"grad_norm": 0.07857555896043777,
"learning_rate": 7.2007722007722e-05,
"loss": 0.0181,
"step": 373
},
{
"epoch": 1.8067632850241546,
"grad_norm": 0.08106198161840439,
"learning_rate": 7.22007722007722e-05,
"loss": 0.016,
"step": 374
},
{
"epoch": 1.8115942028985508,
"grad_norm": 0.17997094988822937,
"learning_rate": 7.23938223938224e-05,
"loss": 0.0191,
"step": 375
},
{
"epoch": 1.8115942028985508,
"eval_loss": 0.013815541751682758,
"eval_runtime": 20.2875,
"eval_samples_per_second": 4.929,
"eval_steps_per_second": 0.148,
"step": 375
},
{
"epoch": 1.816425120772947,
"grad_norm": 0.0769357979297638,
"learning_rate": 7.258687258687259e-05,
"loss": 0.0268,
"step": 376
},
{
"epoch": 1.821256038647343,
"grad_norm": 0.07703779637813568,
"learning_rate": 7.277992277992279e-05,
"loss": 0.0208,
"step": 377
},
{
"epoch": 1.8260869565217392,
"grad_norm": 0.09476115554571152,
"learning_rate": 7.297297297297297e-05,
"loss": 0.0247,
"step": 378
},
{
"epoch": 1.8309178743961354,
"grad_norm": 0.06667820364236832,
"learning_rate": 7.316602316602317e-05,
"loss": 0.0166,
"step": 379
},
{
"epoch": 1.8357487922705316,
"grad_norm": 0.06834971159696579,
"learning_rate": 7.335907335907336e-05,
"loss": 0.0146,
"step": 380
},
{
"epoch": 1.8405797101449275,
"grad_norm": 0.0759090781211853,
"learning_rate": 7.355212355212356e-05,
"loss": 0.0191,
"step": 381
},
{
"epoch": 1.8454106280193237,
"grad_norm": 0.0922810360789299,
"learning_rate": 7.374517374517374e-05,
"loss": 0.0175,
"step": 382
},
{
"epoch": 1.8502415458937198,
"grad_norm": 0.08224644511938095,
"learning_rate": 7.393822393822394e-05,
"loss": 0.0184,
"step": 383
},
{
"epoch": 1.855072463768116,
"grad_norm": 0.07002474367618561,
"learning_rate": 7.413127413127413e-05,
"loss": 0.0148,
"step": 384
},
{
"epoch": 1.8599033816425121,
"grad_norm": 0.06282170861959457,
"learning_rate": 7.432432432432433e-05,
"loss": 0.0146,
"step": 385
},
{
"epoch": 1.864734299516908,
"grad_norm": 0.0824386402964592,
"learning_rate": 7.451737451737452e-05,
"loss": 0.0177,
"step": 386
},
{
"epoch": 1.8695652173913042,
"grad_norm": 0.10285218060016632,
"learning_rate": 7.471042471042471e-05,
"loss": 0.0266,
"step": 387
},
{
"epoch": 1.8743961352657004,
"grad_norm": 0.06601214408874512,
"learning_rate": 7.49034749034749e-05,
"loss": 0.0143,
"step": 388
},
{
"epoch": 1.8792270531400965,
"grad_norm": 0.08264856040477753,
"learning_rate": 7.50965250965251e-05,
"loss": 0.0211,
"step": 389
},
{
"epoch": 1.8840579710144927,
"grad_norm": 0.06638066470623016,
"learning_rate": 7.52895752895753e-05,
"loss": 0.0182,
"step": 390
},
{
"epoch": 1.8888888888888888,
"grad_norm": 0.08071152865886688,
"learning_rate": 7.548262548262549e-05,
"loss": 0.02,
"step": 391
},
{
"epoch": 1.893719806763285,
"grad_norm": 0.0696544498205185,
"learning_rate": 7.567567567567568e-05,
"loss": 0.0193,
"step": 392
},
{
"epoch": 1.8985507246376812,
"grad_norm": 0.0809270590543747,
"learning_rate": 7.586872586872587e-05,
"loss": 0.0192,
"step": 393
},
{
"epoch": 1.9033816425120773,
"grad_norm": 0.06880292296409607,
"learning_rate": 7.606177606177607e-05,
"loss": 0.0171,
"step": 394
},
{
"epoch": 1.9082125603864735,
"grad_norm": 0.10304336249828339,
"learning_rate": 7.625482625482626e-05,
"loss": 0.0154,
"step": 395
},
{
"epoch": 1.9130434782608696,
"grad_norm": 0.07314369082450867,
"learning_rate": 7.644787644787645e-05,
"loss": 0.0165,
"step": 396
},
{
"epoch": 1.9178743961352658,
"grad_norm": 0.08954203128814697,
"learning_rate": 7.664092664092664e-05,
"loss": 0.017,
"step": 397
},
{
"epoch": 1.922705314009662,
"grad_norm": 0.11527922749519348,
"learning_rate": 7.683397683397684e-05,
"loss": 0.0175,
"step": 398
},
{
"epoch": 1.927536231884058,
"grad_norm": 0.14367862045764923,
"learning_rate": 7.702702702702703e-05,
"loss": 0.0201,
"step": 399
},
{
"epoch": 1.9323671497584543,
"grad_norm": 0.09122224897146225,
"learning_rate": 7.722007722007723e-05,
"loss": 0.0273,
"step": 400
},
{
"epoch": 1.9323671497584543,
"eval_loss": 0.011990789324045181,
"eval_runtime": 20.3107,
"eval_samples_per_second": 4.924,
"eval_steps_per_second": 0.148,
"step": 400
},
{
"epoch": 1.9371980676328504,
"grad_norm": 0.07356519997119904,
"learning_rate": 7.741312741312741e-05,
"loss": 0.024,
"step": 401
},
{
"epoch": 1.9420289855072463,
"grad_norm": 0.11204323172569275,
"learning_rate": 7.760617760617761e-05,
"loss": 0.0271,
"step": 402
},
{
"epoch": 1.9468599033816425,
"grad_norm": 0.056930117309093475,
"learning_rate": 7.779922779922781e-05,
"loss": 0.0177,
"step": 403
},
{
"epoch": 1.9516908212560387,
"grad_norm": 0.07158982753753662,
"learning_rate": 7.7992277992278e-05,
"loss": 0.0191,
"step": 404
},
{
"epoch": 1.9565217391304348,
"grad_norm": 0.09313391894102097,
"learning_rate": 7.81853281853282e-05,
"loss": 0.0183,
"step": 405
},
{
"epoch": 1.961352657004831,
"grad_norm": 0.06606443971395493,
"learning_rate": 7.837837837837838e-05,
"loss": 0.0164,
"step": 406
},
{
"epoch": 1.966183574879227,
"grad_norm": 0.07535674422979355,
"learning_rate": 7.857142857142858e-05,
"loss": 0.0182,
"step": 407
},
{
"epoch": 1.971014492753623,
"grad_norm": 0.061097193509340286,
"learning_rate": 7.876447876447877e-05,
"loss": 0.015,
"step": 408
},
{
"epoch": 1.9758454106280192,
"grad_norm": 0.05617830529808998,
"learning_rate": 7.895752895752897e-05,
"loss": 0.0144,
"step": 409
},
{
"epoch": 1.9806763285024154,
"grad_norm": 0.12362464517354965,
"learning_rate": 7.915057915057915e-05,
"loss": 0.0154,
"step": 410
},
{
"epoch": 1.9855072463768115,
"grad_norm": 0.08519264310598373,
"learning_rate": 7.934362934362935e-05,
"loss": 0.0227,
"step": 411
},
{
"epoch": 1.9903381642512077,
"grad_norm": 0.0802370011806488,
"learning_rate": 7.953667953667954e-05,
"loss": 0.0185,
"step": 412
},
{
"epoch": 1.9951690821256038,
"grad_norm": 0.07258555293083191,
"learning_rate": 7.972972972972974e-05,
"loss": 0.024,
"step": 413
},
{
"epoch": 2.0,
"grad_norm": 0.14007550477981567,
"learning_rate": 7.992277992277992e-05,
"loss": 0.0156,
"step": 414
},
{
"epoch": 2.004830917874396,
"grad_norm": 0.049884360283613205,
"learning_rate": 8.011583011583012e-05,
"loss": 0.0109,
"step": 415
},
{
"epoch": 2.0096618357487923,
"grad_norm": 0.06829845905303955,
"learning_rate": 8.03088803088803e-05,
"loss": 0.013,
"step": 416
},
{
"epoch": 2.0144927536231885,
"grad_norm": 0.08212131261825562,
"learning_rate": 8.05019305019305e-05,
"loss": 0.0209,
"step": 417
},
{
"epoch": 2.0193236714975846,
"grad_norm": 0.07331026345491409,
"learning_rate": 8.06949806949807e-05,
"loss": 0.0104,
"step": 418
},
{
"epoch": 2.024154589371981,
"grad_norm": 0.06080864369869232,
"learning_rate": 8.088803088803089e-05,
"loss": 0.0142,
"step": 419
},
{
"epoch": 2.028985507246377,
"grad_norm": 0.07056247442960739,
"learning_rate": 8.108108108108109e-05,
"loss": 0.0155,
"step": 420
},
{
"epoch": 2.033816425120773,
"grad_norm": 0.0810440331697464,
"learning_rate": 8.127413127413128e-05,
"loss": 0.0148,
"step": 421
},
{
"epoch": 2.0386473429951693,
"grad_norm": 0.08997571468353271,
"learning_rate": 8.146718146718148e-05,
"loss": 0.0126,
"step": 422
},
{
"epoch": 2.0434782608695654,
"grad_norm": 0.07606981694698334,
"learning_rate": 8.166023166023166e-05,
"loss": 0.0153,
"step": 423
},
{
"epoch": 2.0483091787439616,
"grad_norm": 0.06378842890262604,
"learning_rate": 8.185328185328186e-05,
"loss": 0.0115,
"step": 424
},
{
"epoch": 2.0531400966183573,
"grad_norm": 0.06914151459932327,
"learning_rate": 8.204633204633205e-05,
"loss": 0.0194,
"step": 425
},
{
"epoch": 2.0531400966183573,
"eval_loss": 0.012465307489037514,
"eval_runtime": 20.2973,
"eval_samples_per_second": 4.927,
"eval_steps_per_second": 0.148,
"step": 425
},
{
"epoch": 2.0579710144927534,
"grad_norm": 0.07839097827672958,
"learning_rate": 8.223938223938225e-05,
"loss": 0.0158,
"step": 426
},
{
"epoch": 2.0628019323671496,
"grad_norm": 0.06898388266563416,
"learning_rate": 8.243243243243243e-05,
"loss": 0.016,
"step": 427
},
{
"epoch": 2.0676328502415457,
"grad_norm": 0.058221787214279175,
"learning_rate": 8.262548262548263e-05,
"loss": 0.0143,
"step": 428
},
{
"epoch": 2.072463768115942,
"grad_norm": 0.05164722725749016,
"learning_rate": 8.281853281853282e-05,
"loss": 0.0113,
"step": 429
},
{
"epoch": 2.077294685990338,
"grad_norm": 0.08281679451465607,
"learning_rate": 8.301158301158302e-05,
"loss": 0.0144,
"step": 430
},
{
"epoch": 2.082125603864734,
"grad_norm": 0.06709595769643784,
"learning_rate": 8.32046332046332e-05,
"loss": 0.0179,
"step": 431
},
{
"epoch": 2.0869565217391304,
"grad_norm": 0.07612422853708267,
"learning_rate": 8.33976833976834e-05,
"loss": 0.0139,
"step": 432
},
{
"epoch": 2.0917874396135265,
"grad_norm": 0.057307854294776917,
"learning_rate": 8.35907335907336e-05,
"loss": 0.0099,
"step": 433
},
{
"epoch": 2.0966183574879227,
"grad_norm": 0.06918705999851227,
"learning_rate": 8.378378378378379e-05,
"loss": 0.0145,
"step": 434
},
{
"epoch": 2.101449275362319,
"grad_norm": 0.07992324978113174,
"learning_rate": 8.397683397683399e-05,
"loss": 0.0133,
"step": 435
},
{
"epoch": 2.106280193236715,
"grad_norm": 0.05948076397180557,
"learning_rate": 8.416988416988417e-05,
"loss": 0.0139,
"step": 436
},
{
"epoch": 2.111111111111111,
"grad_norm": 0.08342638611793518,
"learning_rate": 8.436293436293437e-05,
"loss": 0.0106,
"step": 437
},
{
"epoch": 2.1159420289855073,
"grad_norm": 0.10372233390808105,
"learning_rate": 8.455598455598456e-05,
"loss": 0.0171,
"step": 438
},
{
"epoch": 2.1207729468599035,
"grad_norm": 0.10722784698009491,
"learning_rate": 8.474903474903476e-05,
"loss": 0.0153,
"step": 439
},
{
"epoch": 2.1256038647342996,
"grad_norm": 0.05321025848388672,
"learning_rate": 8.494208494208494e-05,
"loss": 0.0157,
"step": 440
},
{
"epoch": 2.130434782608696,
"grad_norm": 0.0834476426243782,
"learning_rate": 8.513513513513514e-05,
"loss": 0.0158,
"step": 441
},
{
"epoch": 2.135265700483092,
"grad_norm": 0.04624385014176369,
"learning_rate": 8.532818532818533e-05,
"loss": 0.0101,
"step": 442
},
{
"epoch": 2.140096618357488,
"grad_norm": 0.09156984090805054,
"learning_rate": 8.552123552123553e-05,
"loss": 0.0139,
"step": 443
},
{
"epoch": 2.1449275362318843,
"grad_norm": 0.11084246635437012,
"learning_rate": 8.571428571428571e-05,
"loss": 0.0172,
"step": 444
},
{
"epoch": 2.14975845410628,
"grad_norm": 0.05298859253525734,
"learning_rate": 8.590733590733591e-05,
"loss": 0.0116,
"step": 445
},
{
"epoch": 2.154589371980676,
"grad_norm": 0.07835599780082703,
"learning_rate": 8.61003861003861e-05,
"loss": 0.0191,
"step": 446
},
{
"epoch": 2.1594202898550723,
"grad_norm": 0.09636026620864868,
"learning_rate": 8.62934362934363e-05,
"loss": 0.0181,
"step": 447
},
{
"epoch": 2.1642512077294684,
"grad_norm": 0.10710686445236206,
"learning_rate": 8.64864864864865e-05,
"loss": 0.015,
"step": 448
},
{
"epoch": 2.1690821256038646,
"grad_norm": 0.06177045777440071,
"learning_rate": 8.667953667953668e-05,
"loss": 0.0165,
"step": 449
},
{
"epoch": 2.1739130434782608,
"grad_norm": 0.05858482047915459,
"learning_rate": 8.687258687258688e-05,
"loss": 0.0125,
"step": 450
},
{
"epoch": 2.1739130434782608,
"eval_loss": 0.012776307761669159,
"eval_runtime": 20.2754,
"eval_samples_per_second": 4.932,
"eval_steps_per_second": 0.148,
"step": 450
},
{
"epoch": 2.178743961352657,
"grad_norm": 0.07402500510215759,
"learning_rate": 8.706563706563707e-05,
"loss": 0.0169,
"step": 451
},
{
"epoch": 2.183574879227053,
"grad_norm": 0.08418232947587967,
"learning_rate": 8.725868725868727e-05,
"loss": 0.0167,
"step": 452
},
{
"epoch": 2.1884057971014492,
"grad_norm": 0.05793262645602226,
"learning_rate": 8.745173745173745e-05,
"loss": 0.0103,
"step": 453
},
{
"epoch": 2.1932367149758454,
"grad_norm": 0.08266332000494003,
"learning_rate": 8.764478764478765e-05,
"loss": 0.0164,
"step": 454
},
{
"epoch": 2.1980676328502415,
"grad_norm": 0.09661264717578888,
"learning_rate": 8.783783783783784e-05,
"loss": 0.0165,
"step": 455
},
{
"epoch": 2.2028985507246377,
"grad_norm": 0.06425665318965912,
"learning_rate": 8.803088803088804e-05,
"loss": 0.0148,
"step": 456
},
{
"epoch": 2.207729468599034,
"grad_norm": 0.09541848301887512,
"learning_rate": 8.822393822393822e-05,
"loss": 0.0139,
"step": 457
},
{
"epoch": 2.21256038647343,
"grad_norm": 0.0549871027469635,
"learning_rate": 8.841698841698842e-05,
"loss": 0.0145,
"step": 458
},
{
"epoch": 2.217391304347826,
"grad_norm": 0.0751703754067421,
"learning_rate": 8.861003861003861e-05,
"loss": 0.012,
"step": 459
},
{
"epoch": 2.2222222222222223,
"grad_norm": 0.061911825090646744,
"learning_rate": 8.880308880308881e-05,
"loss": 0.0165,
"step": 460
},
{
"epoch": 2.2270531400966185,
"grad_norm": 0.07648997008800507,
"learning_rate": 8.899613899613901e-05,
"loss": 0.0168,
"step": 461
},
{
"epoch": 2.2318840579710146,
"grad_norm": 0.06206296756863594,
"learning_rate": 8.918918918918919e-05,
"loss": 0.0135,
"step": 462
},
{
"epoch": 2.236714975845411,
"grad_norm": 0.06858911365270615,
"learning_rate": 8.938223938223939e-05,
"loss": 0.015,
"step": 463
},
{
"epoch": 2.241545893719807,
"grad_norm": 0.0879889577627182,
"learning_rate": 8.957528957528958e-05,
"loss": 0.0133,
"step": 464
},
{
"epoch": 2.246376811594203,
"grad_norm": 0.06427818536758423,
"learning_rate": 8.976833976833978e-05,
"loss": 0.018,
"step": 465
},
{
"epoch": 2.2512077294685993,
"grad_norm": 0.06355290859937668,
"learning_rate": 8.996138996138996e-05,
"loss": 0.0112,
"step": 466
},
{
"epoch": 2.2560386473429954,
"grad_norm": 0.06012749299407005,
"learning_rate": 9.015444015444016e-05,
"loss": 0.0154,
"step": 467
},
{
"epoch": 2.260869565217391,
"grad_norm": 0.08245503157377243,
"learning_rate": 9.034749034749035e-05,
"loss": 0.0168,
"step": 468
},
{
"epoch": 2.2657004830917873,
"grad_norm": 0.07163789123296738,
"learning_rate": 9.054054054054055e-05,
"loss": 0.0171,
"step": 469
},
{
"epoch": 2.2705314009661834,
"grad_norm": 0.11928809434175491,
"learning_rate": 9.073359073359073e-05,
"loss": 0.0202,
"step": 470
},
{
"epoch": 2.2753623188405796,
"grad_norm": 0.05260397493839264,
"learning_rate": 9.092664092664093e-05,
"loss": 0.012,
"step": 471
},
{
"epoch": 2.2801932367149758,
"grad_norm": 0.12959927320480347,
"learning_rate": 9.111969111969112e-05,
"loss": 0.0231,
"step": 472
},
{
"epoch": 2.285024154589372,
"grad_norm": 0.06374755501747131,
"learning_rate": 9.131274131274132e-05,
"loss": 0.0117,
"step": 473
},
{
"epoch": 2.289855072463768,
"grad_norm": 0.08595716953277588,
"learning_rate": 9.15057915057915e-05,
"loss": 0.0158,
"step": 474
},
{
"epoch": 2.2946859903381642,
"grad_norm": 0.05997462198138237,
"learning_rate": 9.16988416988417e-05,
"loss": 0.0132,
"step": 475
},
{
"epoch": 2.2946859903381642,
"eval_loss": 0.011733826249837875,
"eval_runtime": 20.2984,
"eval_samples_per_second": 4.927,
"eval_steps_per_second": 0.148,
"step": 475
},
{
"epoch": 2.2995169082125604,
"grad_norm": 0.0654623806476593,
"learning_rate": 9.18918918918919e-05,
"loss": 0.0141,
"step": 476
},
{
"epoch": 2.3043478260869565,
"grad_norm": 0.09593897312879562,
"learning_rate": 9.208494208494209e-05,
"loss": 0.0171,
"step": 477
},
{
"epoch": 2.3091787439613527,
"grad_norm": 0.05213898792862892,
"learning_rate": 9.227799227799229e-05,
"loss": 0.0125,
"step": 478
},
{
"epoch": 2.314009661835749,
"grad_norm": 0.06953102350234985,
"learning_rate": 9.247104247104247e-05,
"loss": 0.016,
"step": 479
},
{
"epoch": 2.318840579710145,
"grad_norm": 0.07360464334487915,
"learning_rate": 9.266409266409267e-05,
"loss": 0.0143,
"step": 480
},
{
"epoch": 2.323671497584541,
"grad_norm": 0.07077494263648987,
"learning_rate": 9.285714285714286e-05,
"loss": 0.0329,
"step": 481
},
{
"epoch": 2.3285024154589373,
"grad_norm": 0.08831976354122162,
"learning_rate": 9.305019305019306e-05,
"loss": 0.0131,
"step": 482
},
{
"epoch": 2.3333333333333335,
"grad_norm": 0.06258786469697952,
"learning_rate": 9.324324324324324e-05,
"loss": 0.0178,
"step": 483
},
{
"epoch": 2.3381642512077296,
"grad_norm": 0.0674007460474968,
"learning_rate": 9.343629343629344e-05,
"loss": 0.0206,
"step": 484
},
{
"epoch": 2.342995169082126,
"grad_norm": 0.09977665543556213,
"learning_rate": 9.362934362934363e-05,
"loss": 0.0118,
"step": 485
},
{
"epoch": 2.3478260869565215,
"grad_norm": 0.057383470237255096,
"learning_rate": 9.382239382239383e-05,
"loss": 0.0157,
"step": 486
},
{
"epoch": 2.3526570048309177,
"grad_norm": 0.07880475372076035,
"learning_rate": 9.401544401544401e-05,
"loss": 0.0108,
"step": 487
},
{
"epoch": 2.357487922705314,
"grad_norm": 0.06846434623003006,
"learning_rate": 9.420849420849421e-05,
"loss": 0.0128,
"step": 488
},
{
"epoch": 2.36231884057971,
"grad_norm": 0.07583679258823395,
"learning_rate": 9.44015444015444e-05,
"loss": 0.0166,
"step": 489
},
{
"epoch": 2.367149758454106,
"grad_norm": 0.0955696627497673,
"learning_rate": 9.45945945945946e-05,
"loss": 0.0209,
"step": 490
},
{
"epoch": 2.3719806763285023,
"grad_norm": 0.08694009482860565,
"learning_rate": 9.47876447876448e-05,
"loss": 0.0164,
"step": 491
},
{
"epoch": 2.3768115942028984,
"grad_norm": 0.04880060255527496,
"learning_rate": 9.498069498069498e-05,
"loss": 0.0119,
"step": 492
},
{
"epoch": 2.3816425120772946,
"grad_norm": 0.07593809068202972,
"learning_rate": 9.517374517374518e-05,
"loss": 0.0214,
"step": 493
},
{
"epoch": 2.3864734299516908,
"grad_norm": 0.07582546025514603,
"learning_rate": 9.536679536679537e-05,
"loss": 0.0147,
"step": 494
},
{
"epoch": 2.391304347826087,
"grad_norm": 0.06178034842014313,
"learning_rate": 9.555984555984557e-05,
"loss": 0.0165,
"step": 495
},
{
"epoch": 2.396135265700483,
"grad_norm": 0.05816735699772835,
"learning_rate": 9.575289575289575e-05,
"loss": 0.011,
"step": 496
},
{
"epoch": 2.4009661835748792,
"grad_norm": 0.0542505607008934,
"learning_rate": 9.594594594594595e-05,
"loss": 0.0164,
"step": 497
},
{
"epoch": 2.4057971014492754,
"grad_norm": 0.07111211121082306,
"learning_rate": 9.613899613899614e-05,
"loss": 0.016,
"step": 498
},
{
"epoch": 2.4106280193236715,
"grad_norm": 0.04318169131875038,
"learning_rate": 9.633204633204634e-05,
"loss": 0.0152,
"step": 499
},
{
"epoch": 2.4154589371980677,
"grad_norm": 0.05651891976594925,
"learning_rate": 9.652509652509652e-05,
"loss": 0.0142,
"step": 500
},
{
"epoch": 2.4154589371980677,
"eval_loss": 0.009894417598843575,
"eval_runtime": 20.2924,
"eval_samples_per_second": 4.928,
"eval_steps_per_second": 0.148,
"step": 500
},
{
"epoch": 2.420289855072464,
"grad_norm": 0.04565607011318207,
"learning_rate": 9.671814671814672e-05,
"loss": 0.0101,
"step": 501
},
{
"epoch": 2.42512077294686,
"grad_norm": 0.05563889071345329,
"learning_rate": 9.691119691119691e-05,
"loss": 0.0181,
"step": 502
},
{
"epoch": 2.429951690821256,
"grad_norm": 0.07889113575220108,
"learning_rate": 9.710424710424711e-05,
"loss": 0.0126,
"step": 503
},
{
"epoch": 2.4347826086956523,
"grad_norm": 0.05971178784966469,
"learning_rate": 9.729729729729731e-05,
"loss": 0.0104,
"step": 504
},
{
"epoch": 2.4396135265700485,
"grad_norm": 0.06959031522274017,
"learning_rate": 9.74903474903475e-05,
"loss": 0.0169,
"step": 505
},
{
"epoch": 2.4444444444444446,
"grad_norm": 0.059511762112379074,
"learning_rate": 9.76833976833977e-05,
"loss": 0.0095,
"step": 506
},
{
"epoch": 2.449275362318841,
"grad_norm": 0.07739004492759705,
"learning_rate": 9.787644787644788e-05,
"loss": 0.0142,
"step": 507
},
{
"epoch": 2.454106280193237,
"grad_norm": 0.06242101639509201,
"learning_rate": 9.806949806949808e-05,
"loss": 0.013,
"step": 508
},
{
"epoch": 2.4589371980676327,
"grad_norm": 0.07871799916028976,
"learning_rate": 9.826254826254826e-05,
"loss": 0.0198,
"step": 509
},
{
"epoch": 2.463768115942029,
"grad_norm": 0.06508443504571915,
"learning_rate": 9.845559845559846e-05,
"loss": 0.0125,
"step": 510
},
{
"epoch": 2.468599033816425,
"grad_norm": 0.049432434141635895,
"learning_rate": 9.864864864864865e-05,
"loss": 0.0105,
"step": 511
},
{
"epoch": 2.473429951690821,
"grad_norm": 0.08035603910684586,
"learning_rate": 9.884169884169885e-05,
"loss": 0.0128,
"step": 512
},
{
"epoch": 2.4782608695652173,
"grad_norm": 0.05681634321808815,
"learning_rate": 9.903474903474904e-05,
"loss": 0.0147,
"step": 513
},
{
"epoch": 2.4830917874396135,
"grad_norm": 0.09850388020277023,
"learning_rate": 9.922779922779923e-05,
"loss": 0.0159,
"step": 514
},
{
"epoch": 2.4879227053140096,
"grad_norm": 0.06364881247282028,
"learning_rate": 9.942084942084942e-05,
"loss": 0.0128,
"step": 515
},
{
"epoch": 2.4927536231884058,
"grad_norm": 0.08354095369577408,
"learning_rate": 9.961389961389962e-05,
"loss": 0.0123,
"step": 516
},
{
"epoch": 2.497584541062802,
"grad_norm": 0.047128040343523026,
"learning_rate": 9.98069498069498e-05,
"loss": 0.0104,
"step": 517
},
{
"epoch": 2.502415458937198,
"grad_norm": 0.07560840994119644,
"learning_rate": 0.0001,
"loss": 0.0115,
"step": 518
},
{
"epoch": 2.5072463768115942,
"grad_norm": 0.08767189085483551,
"learning_rate": 9.9999988623013e-05,
"loss": 0.02,
"step": 519
},
{
"epoch": 2.5120772946859904,
"grad_norm": 0.06940112262964249,
"learning_rate": 9.999995449205719e-05,
"loss": 0.0128,
"step": 520
},
{
"epoch": 2.5169082125603865,
"grad_norm": 0.08003146946430206,
"learning_rate": 9.999989760714809e-05,
"loss": 0.0156,
"step": 521
},
{
"epoch": 2.5217391304347827,
"grad_norm": 0.06202618032693863,
"learning_rate": 9.999981796831159e-05,
"loss": 0.0152,
"step": 522
},
{
"epoch": 2.526570048309179,
"grad_norm": 0.05850287154316902,
"learning_rate": 9.999971557558395e-05,
"loss": 0.0134,
"step": 523
},
{
"epoch": 2.531400966183575,
"grad_norm": 0.05508386343717575,
"learning_rate": 9.999959042901174e-05,
"loss": 0.0162,
"step": 524
},
{
"epoch": 2.536231884057971,
"grad_norm": 0.05612890422344208,
"learning_rate": 9.999944252865192e-05,
"loss": 0.0119,
"step": 525
},
{
"epoch": 2.536231884057971,
"eval_loss": 0.010469529777765274,
"eval_runtime": 20.2698,
"eval_samples_per_second": 4.933,
"eval_steps_per_second": 0.148,
"step": 525
},
{
"epoch": 2.541062801932367,
"grad_norm": 0.06656938046216965,
"learning_rate": 9.999927187457181e-05,
"loss": 0.0124,
"step": 526
},
{
"epoch": 2.545893719806763,
"grad_norm": 0.05605817586183548,
"learning_rate": 9.999907846684906e-05,
"loss": 0.0152,
"step": 527
},
{
"epoch": 2.550724637681159,
"grad_norm": 0.055510830134153366,
"learning_rate": 9.999886230557167e-05,
"loss": 0.0182,
"step": 528
},
{
"epoch": 2.5555555555555554,
"grad_norm": 0.047456130385398865,
"learning_rate": 9.999862339083804e-05,
"loss": 0.012,
"step": 529
},
{
"epoch": 2.5603864734299515,
"grad_norm": 0.08240535855293274,
"learning_rate": 9.999836172275688e-05,
"loss": 0.015,
"step": 530
},
{
"epoch": 2.5652173913043477,
"grad_norm": 0.06938566267490387,
"learning_rate": 9.999807730144728e-05,
"loss": 0.0131,
"step": 531
},
{
"epoch": 2.570048309178744,
"grad_norm": 0.054002463817596436,
"learning_rate": 9.999777012703866e-05,
"loss": 0.0117,
"step": 532
},
{
"epoch": 2.57487922705314,
"grad_norm": 0.06356227397918701,
"learning_rate": 9.999744019967081e-05,
"loss": 0.01,
"step": 533
},
{
"epoch": 2.579710144927536,
"grad_norm": 0.0544712208211422,
"learning_rate": 9.999708751949389e-05,
"loss": 0.0103,
"step": 534
},
{
"epoch": 2.5845410628019323,
"grad_norm": 0.06751914322376251,
"learning_rate": 9.999671208666838e-05,
"loss": 0.0142,
"step": 535
},
{
"epoch": 2.5893719806763285,
"grad_norm": 0.04503876715898514,
"learning_rate": 9.999631390136513e-05,
"loss": 0.0115,
"step": 536
},
{
"epoch": 2.5942028985507246,
"grad_norm": 0.07345236837863922,
"learning_rate": 9.999589296376537e-05,
"loss": 0.0115,
"step": 537
},
{
"epoch": 2.5990338164251208,
"grad_norm": 0.07252614200115204,
"learning_rate": 9.999544927406063e-05,
"loss": 0.0146,
"step": 538
},
{
"epoch": 2.603864734299517,
"grad_norm": 0.07327065616846085,
"learning_rate": 9.999498283245284e-05,
"loss": 0.0125,
"step": 539
},
{
"epoch": 2.608695652173913,
"grad_norm": 0.04617929458618164,
"learning_rate": 9.999449363915427e-05,
"loss": 0.0103,
"step": 540
},
{
"epoch": 2.6135265700483092,
"grad_norm": 0.05602256953716278,
"learning_rate": 9.999398169438754e-05,
"loss": 0.0124,
"step": 541
},
{
"epoch": 2.6183574879227054,
"grad_norm": 0.07086073607206345,
"learning_rate": 9.999344699838562e-05,
"loss": 0.0138,
"step": 542
},
{
"epoch": 2.6231884057971016,
"grad_norm": 0.07867629081010818,
"learning_rate": 9.999288955139183e-05,
"loss": 0.0154,
"step": 543
},
{
"epoch": 2.6280193236714977,
"grad_norm": 0.06904333829879761,
"learning_rate": 9.999230935365989e-05,
"loss": 0.0161,
"step": 544
},
{
"epoch": 2.632850241545894,
"grad_norm": 0.04477986320853233,
"learning_rate": 9.999170640545378e-05,
"loss": 0.0141,
"step": 545
},
{
"epoch": 2.63768115942029,
"grad_norm": 0.07836603373289108,
"learning_rate": 9.999108070704795e-05,
"loss": 0.0111,
"step": 546
},
{
"epoch": 2.642512077294686,
"grad_norm": 0.07152969390153885,
"learning_rate": 9.99904322587271e-05,
"loss": 0.0124,
"step": 547
},
{
"epoch": 2.6473429951690823,
"grad_norm": 0.08956319838762283,
"learning_rate": 9.998976106078634e-05,
"loss": 0.0189,
"step": 548
},
{
"epoch": 2.6521739130434785,
"grad_norm": 0.05870571359992027,
"learning_rate": 9.99890671135311e-05,
"loss": 0.0177,
"step": 549
},
{
"epoch": 2.6570048309178746,
"grad_norm": 0.05460686236619949,
"learning_rate": 9.998835041727723e-05,
"loss": 0.0131,
"step": 550
},
{
"epoch": 2.6570048309178746,
"eval_loss": 0.011807677336037159,
"eval_runtime": 20.2717,
"eval_samples_per_second": 4.933,
"eval_steps_per_second": 0.148,
"step": 550
},
{
"epoch": 2.661835748792271,
"grad_norm": 0.08161139488220215,
"learning_rate": 9.998761097235083e-05,
"loss": 0.0124,
"step": 551
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.05696139857172966,
"learning_rate": 9.998684877908844e-05,
"loss": 0.0149,
"step": 552
},
{
"epoch": 2.6714975845410627,
"grad_norm": 0.10040969401597977,
"learning_rate": 9.998606383783691e-05,
"loss": 0.017,
"step": 553
},
{
"epoch": 2.676328502415459,
"grad_norm": 0.05457184091210365,
"learning_rate": 9.998525614895343e-05,
"loss": 0.0142,
"step": 554
},
{
"epoch": 2.681159420289855,
"grad_norm": 0.08906134963035583,
"learning_rate": 9.99844257128056e-05,
"loss": 0.0198,
"step": 555
},
{
"epoch": 2.685990338164251,
"grad_norm": 0.06964429467916489,
"learning_rate": 9.99835725297713e-05,
"loss": 0.0118,
"step": 556
},
{
"epoch": 2.6908212560386473,
"grad_norm": 0.03187815472483635,
"learning_rate": 9.998269660023882e-05,
"loss": 0.0067,
"step": 557
},
{
"epoch": 2.6956521739130435,
"grad_norm": 0.0665697306394577,
"learning_rate": 9.998179792460676e-05,
"loss": 0.012,
"step": 558
},
{
"epoch": 2.7004830917874396,
"grad_norm": 0.05260172113776207,
"learning_rate": 9.99808765032841e-05,
"loss": 0.0126,
"step": 559
},
{
"epoch": 2.7053140096618358,
"grad_norm": 0.048168253153562546,
"learning_rate": 9.997993233669014e-05,
"loss": 0.013,
"step": 560
},
{
"epoch": 2.710144927536232,
"grad_norm": 0.07857447117567062,
"learning_rate": 9.997896542525459e-05,
"loss": 0.0097,
"step": 561
},
{
"epoch": 2.714975845410628,
"grad_norm": 0.042889662086963654,
"learning_rate": 9.997797576941744e-05,
"loss": 0.0122,
"step": 562
},
{
"epoch": 2.7198067632850242,
"grad_norm": 0.056598689407110214,
"learning_rate": 9.997696336962907e-05,
"loss": 0.0115,
"step": 563
},
{
"epoch": 2.7246376811594204,
"grad_norm": 0.06313615292310715,
"learning_rate": 9.997592822635021e-05,
"loss": 0.0152,
"step": 564
},
{
"epoch": 2.7294685990338166,
"grad_norm": 0.05057726800441742,
"learning_rate": 9.997487034005193e-05,
"loss": 0.0157,
"step": 565
},
{
"epoch": 2.7342995169082127,
"grad_norm": 0.044472839683294296,
"learning_rate": 9.997378971121564e-05,
"loss": 0.0098,
"step": 566
},
{
"epoch": 2.7391304347826084,
"grad_norm": 0.07471047341823578,
"learning_rate": 9.997268634033312e-05,
"loss": 0.0172,
"step": 567
},
{
"epoch": 2.7439613526570046,
"grad_norm": 0.053372822701931,
"learning_rate": 9.99715602279065e-05,
"loss": 0.014,
"step": 568
},
{
"epoch": 2.7487922705314007,
"grad_norm": 0.0864279493689537,
"learning_rate": 9.997041137444823e-05,
"loss": 0.0163,
"step": 569
},
{
"epoch": 2.753623188405797,
"grad_norm": 0.0629257932305336,
"learning_rate": 9.996923978048115e-05,
"loss": 0.0129,
"step": 570
},
{
"epoch": 2.758454106280193,
"grad_norm": 0.05703673139214516,
"learning_rate": 9.996804544653842e-05,
"loss": 0.0098,
"step": 571
},
{
"epoch": 2.763285024154589,
"grad_norm": 0.04491772875189781,
"learning_rate": 9.996682837316356e-05,
"loss": 0.0101,
"step": 572
},
{
"epoch": 2.7681159420289854,
"grad_norm": 0.06901387870311737,
"learning_rate": 9.996558856091043e-05,
"loss": 0.0198,
"step": 573
},
{
"epoch": 2.7729468599033815,
"grad_norm": 0.07164058834314346,
"learning_rate": 9.996432601034324e-05,
"loss": 0.0125,
"step": 574
},
{
"epoch": 2.7777777777777777,
"grad_norm": 0.059621043503284454,
"learning_rate": 9.996304072203657e-05,
"loss": 0.0089,
"step": 575
},
{
"epoch": 2.7777777777777777,
"eval_loss": 0.00997437909245491,
"eval_runtime": 20.2741,
"eval_samples_per_second": 4.932,
"eval_steps_per_second": 0.148,
"step": 575
},
{
"epoch": 2.782608695652174,
"grad_norm": 0.07000112533569336,
"learning_rate": 9.99617326965753e-05,
"loss": 0.0105,
"step": 576
},
{
"epoch": 2.78743961352657,
"grad_norm": 0.08453238010406494,
"learning_rate": 9.996040193455472e-05,
"loss": 0.015,
"step": 577
},
{
"epoch": 2.792270531400966,
"grad_norm": 0.07728136330842972,
"learning_rate": 9.99590484365804e-05,
"loss": 0.017,
"step": 578
},
{
"epoch": 2.7971014492753623,
"grad_norm": 0.11687036603689194,
"learning_rate": 9.995767220326829e-05,
"loss": 0.0235,
"step": 579
},
{
"epoch": 2.8019323671497585,
"grad_norm": 0.07454598695039749,
"learning_rate": 9.99562732352447e-05,
"loss": 0.0115,
"step": 580
},
{
"epoch": 2.8067632850241546,
"grad_norm": 0.049887824803590775,
"learning_rate": 9.995485153314628e-05,
"loss": 0.0111,
"step": 581
},
{
"epoch": 2.8115942028985508,
"grad_norm": 0.05184588208794594,
"learning_rate": 9.995340709762002e-05,
"loss": 0.0149,
"step": 582
},
{
"epoch": 2.816425120772947,
"grad_norm": 0.05257965624332428,
"learning_rate": 9.995193992932321e-05,
"loss": 0.0132,
"step": 583
},
{
"epoch": 2.821256038647343,
"grad_norm": 0.05391634255647659,
"learning_rate": 9.995045002892358e-05,
"loss": 0.0159,
"step": 584
},
{
"epoch": 2.8260869565217392,
"grad_norm": 0.052167993038892746,
"learning_rate": 9.994893739709912e-05,
"loss": 0.0128,
"step": 585
},
{
"epoch": 2.8309178743961354,
"grad_norm": 0.03560145944356918,
"learning_rate": 9.994740203453821e-05,
"loss": 0.0094,
"step": 586
},
{
"epoch": 2.8357487922705316,
"grad_norm": 0.05537371709942818,
"learning_rate": 9.994584394193957e-05,
"loss": 0.0116,
"step": 587
},
{
"epoch": 2.8405797101449277,
"grad_norm": 0.05569041520357132,
"learning_rate": 9.994426312001223e-05,
"loss": 0.0121,
"step": 588
},
{
"epoch": 2.845410628019324,
"grad_norm": 0.04385409504175186,
"learning_rate": 9.994265956947563e-05,
"loss": 0.0078,
"step": 589
},
{
"epoch": 2.85024154589372,
"grad_norm": 0.056397683918476105,
"learning_rate": 9.994103329105947e-05,
"loss": 0.0147,
"step": 590
},
{
"epoch": 2.855072463768116,
"grad_norm": 0.06379224359989166,
"learning_rate": 9.993938428550387e-05,
"loss": 0.0147,
"step": 591
},
{
"epoch": 2.8599033816425123,
"grad_norm": 0.06637546420097351,
"learning_rate": 9.993771255355921e-05,
"loss": 0.0144,
"step": 592
},
{
"epoch": 2.864734299516908,
"grad_norm": 0.05549359321594238,
"learning_rate": 9.993601809598634e-05,
"loss": 0.0149,
"step": 593
},
{
"epoch": 2.869565217391304,
"grad_norm": 0.06366102397441864,
"learning_rate": 9.99343009135563e-05,
"loss": 0.0119,
"step": 594
},
{
"epoch": 2.8743961352657004,
"grad_norm": 0.047170039266347885,
"learning_rate": 9.993256100705058e-05,
"loss": 0.0126,
"step": 595
},
{
"epoch": 2.8792270531400965,
"grad_norm": 0.07046766579151154,
"learning_rate": 9.993079837726096e-05,
"loss": 0.0121,
"step": 596
},
{
"epoch": 2.8840579710144927,
"grad_norm": 0.06048014014959335,
"learning_rate": 9.992901302498959e-05,
"loss": 0.0125,
"step": 597
},
{
"epoch": 2.888888888888889,
"grad_norm": 0.09012962877750397,
"learning_rate": 9.992720495104895e-05,
"loss": 0.0142,
"step": 598
},
{
"epoch": 2.893719806763285,
"grad_norm": 0.05745493993163109,
"learning_rate": 9.992537415626183e-05,
"loss": 0.0144,
"step": 599
},
{
"epoch": 2.898550724637681,
"grad_norm": 0.08338668942451477,
"learning_rate": 9.992352064146142e-05,
"loss": 0.0158,
"step": 600
},
{
"epoch": 2.898550724637681,
"eval_loss": 0.009580705314874649,
"eval_runtime": 20.3022,
"eval_samples_per_second": 4.926,
"eval_steps_per_second": 0.148,
"step": 600
},
{
"epoch": 2.9033816425120773,
"grad_norm": 0.04696241393685341,
"learning_rate": 9.992164440749119e-05,
"loss": 0.0104,
"step": 601
},
{
"epoch": 2.9082125603864735,
"grad_norm": 0.0753055065870285,
"learning_rate": 9.9919745455205e-05,
"loss": 0.0133,
"step": 602
},
{
"epoch": 2.9130434782608696,
"grad_norm": 0.054660920053720474,
"learning_rate": 9.991782378546702e-05,
"loss": 0.016,
"step": 603
},
{
"epoch": 2.917874396135266,
"grad_norm": 0.05713624507188797,
"learning_rate": 9.991587939915173e-05,
"loss": 0.0128,
"step": 604
},
{
"epoch": 2.922705314009662,
"grad_norm": 0.06272771209478378,
"learning_rate": 9.991391229714401e-05,
"loss": 0.016,
"step": 605
},
{
"epoch": 2.927536231884058,
"grad_norm": 0.059268005192279816,
"learning_rate": 9.991192248033908e-05,
"loss": 0.0201,
"step": 606
},
{
"epoch": 2.9323671497584543,
"grad_norm": 0.05580710619688034,
"learning_rate": 9.990990994964239e-05,
"loss": 0.0099,
"step": 607
},
{
"epoch": 2.9371980676328504,
"grad_norm": 0.05673280358314514,
"learning_rate": 9.990787470596985e-05,
"loss": 0.0138,
"step": 608
},
{
"epoch": 2.942028985507246,
"grad_norm": 0.05470005422830582,
"learning_rate": 9.990581675024763e-05,
"loss": 0.0109,
"step": 609
},
{
"epoch": 2.9468599033816423,
"grad_norm": 0.04339778795838356,
"learning_rate": 9.99037360834123e-05,
"loss": 0.0114,
"step": 610
},
{
"epoch": 2.9516908212560384,
"grad_norm": 0.04861651733517647,
"learning_rate": 9.99016327064107e-05,
"loss": 0.0105,
"step": 611
},
{
"epoch": 2.9565217391304346,
"grad_norm": 0.05664124712347984,
"learning_rate": 9.989950662020007e-05,
"loss": 0.014,
"step": 612
},
{
"epoch": 2.9613526570048307,
"grad_norm": 0.06411837786436081,
"learning_rate": 9.98973578257479e-05,
"loss": 0.0148,
"step": 613
},
{
"epoch": 2.966183574879227,
"grad_norm": 0.06767908483743668,
"learning_rate": 9.989518632403208e-05,
"loss": 0.0121,
"step": 614
},
{
"epoch": 2.971014492753623,
"grad_norm": 0.07915518432855606,
"learning_rate": 9.989299211604082e-05,
"loss": 0.0131,
"step": 615
},
{
"epoch": 2.975845410628019,
"grad_norm": 0.06088368594646454,
"learning_rate": 9.989077520277264e-05,
"loss": 0.0098,
"step": 616
},
{
"epoch": 2.9806763285024154,
"grad_norm": 0.0866197720170021,
"learning_rate": 9.988853558523646e-05,
"loss": 0.0148,
"step": 617
},
{
"epoch": 2.9855072463768115,
"grad_norm": 0.05279546231031418,
"learning_rate": 9.988627326445143e-05,
"loss": 0.0095,
"step": 618
},
{
"epoch": 2.9903381642512077,
"grad_norm": 0.06465419381856918,
"learning_rate": 9.988398824144714e-05,
"loss": 0.0214,
"step": 619
},
{
"epoch": 2.995169082125604,
"grad_norm": 0.06091202422976494,
"learning_rate": 9.98816805172634e-05,
"loss": 0.0131,
"step": 620
},
{
"epoch": 3.0,
"grad_norm": 0.0471409372985363,
"learning_rate": 9.987935009295044e-05,
"loss": 0.0073,
"step": 621
},
{
"epoch": 3.004830917874396,
"grad_norm": 0.05190076678991318,
"learning_rate": 9.987699696956878e-05,
"loss": 0.0091,
"step": 622
},
{
"epoch": 3.0096618357487923,
"grad_norm": 0.03487321734428406,
"learning_rate": 9.987462114818928e-05,
"loss": 0.0091,
"step": 623
},
{
"epoch": 3.0144927536231885,
"grad_norm": 0.05589701607823372,
"learning_rate": 9.987222262989315e-05,
"loss": 0.0095,
"step": 624
},
{
"epoch": 3.0193236714975846,
"grad_norm": 0.06497800350189209,
"learning_rate": 9.986980141577187e-05,
"loss": 0.0119,
"step": 625
},
{
"epoch": 3.0193236714975846,
"eval_loss": 0.009575610049068928,
"eval_runtime": 20.3012,
"eval_samples_per_second": 4.926,
"eval_steps_per_second": 0.148,
"step": 625
},
{
"epoch": 3.024154589371981,
"grad_norm": 0.041647836565971375,
"learning_rate": 9.98673575069273e-05,
"loss": 0.0079,
"step": 626
},
{
"epoch": 3.028985507246377,
"grad_norm": 0.05941716581583023,
"learning_rate": 9.98648909044716e-05,
"loss": 0.0091,
"step": 627
},
{
"epoch": 3.033816425120773,
"grad_norm": 0.0788661390542984,
"learning_rate": 9.986240160952732e-05,
"loss": 0.0105,
"step": 628
},
{
"epoch": 3.0386473429951693,
"grad_norm": 0.04788121581077576,
"learning_rate": 9.985988962322721e-05,
"loss": 0.0075,
"step": 629
},
{
"epoch": 3.0434782608695654,
"grad_norm": 0.07558400183916092,
"learning_rate": 9.985735494671448e-05,
"loss": 0.0119,
"step": 630
},
{
"epoch": 3.0483091787439616,
"grad_norm": 0.06005953997373581,
"learning_rate": 9.985479758114259e-05,
"loss": 0.0122,
"step": 631
},
{
"epoch": 3.0531400966183573,
"grad_norm": 0.0517011322081089,
"learning_rate": 9.985221752767535e-05,
"loss": 0.0089,
"step": 632
},
{
"epoch": 3.0579710144927534,
"grad_norm": 0.045838914811611176,
"learning_rate": 9.984961478748688e-05,
"loss": 0.0066,
"step": 633
},
{
"epoch": 3.0628019323671496,
"grad_norm": 0.05188421905040741,
"learning_rate": 9.984698936176164e-05,
"loss": 0.0151,
"step": 634
},
{
"epoch": 3.0676328502415457,
"grad_norm": 0.04941697418689728,
"learning_rate": 9.984434125169441e-05,
"loss": 0.0132,
"step": 635
},
{
"epoch": 3.072463768115942,
"grad_norm": 0.07087542861700058,
"learning_rate": 9.98416704584903e-05,
"loss": 0.0143,
"step": 636
},
{
"epoch": 3.077294685990338,
"grad_norm": 0.03977886214852333,
"learning_rate": 9.983897698336471e-05,
"loss": 0.0117,
"step": 637
},
{
"epoch": 3.082125603864734,
"grad_norm": 0.04413791000843048,
"learning_rate": 9.98362608275434e-05,
"loss": 0.0084,
"step": 638
},
{
"epoch": 3.0869565217391304,
"grad_norm": 0.053000308573246,
"learning_rate": 9.983352199226243e-05,
"loss": 0.0111,
"step": 639
},
{
"epoch": 3.0917874396135265,
"grad_norm": 0.050405777990818024,
"learning_rate": 9.98307604787682e-05,
"loss": 0.0076,
"step": 640
},
{
"epoch": 3.0966183574879227,
"grad_norm": 0.041119880974292755,
"learning_rate": 9.982797628831739e-05,
"loss": 0.0102,
"step": 641
},
{
"epoch": 3.101449275362319,
"grad_norm": 0.06324031949043274,
"learning_rate": 9.982516942217705e-05,
"loss": 0.0105,
"step": 642
},
{
"epoch": 3.106280193236715,
"grad_norm": 0.06366854906082153,
"learning_rate": 9.982233988162455e-05,
"loss": 0.0106,
"step": 643
},
{
"epoch": 3.111111111111111,
"grad_norm": 0.06035742536187172,
"learning_rate": 9.981948766794752e-05,
"loss": 0.0109,
"step": 644
},
{
"epoch": 3.1159420289855073,
"grad_norm": 0.04412363842129707,
"learning_rate": 9.981661278244394e-05,
"loss": 0.0106,
"step": 645
},
{
"epoch": 3.1207729468599035,
"grad_norm": 0.07694416493177414,
"learning_rate": 9.981371522642212e-05,
"loss": 0.0102,
"step": 646
},
{
"epoch": 3.1256038647342996,
"grad_norm": 0.04476933553814888,
"learning_rate": 9.98107950012007e-05,
"loss": 0.009,
"step": 647
},
{
"epoch": 3.130434782608696,
"grad_norm": 0.06581299751996994,
"learning_rate": 9.980785210810859e-05,
"loss": 0.0094,
"step": 648
},
{
"epoch": 3.135265700483092,
"grad_norm": 0.05227496474981308,
"learning_rate": 9.980488654848505e-05,
"loss": 0.0127,
"step": 649
},
{
"epoch": 3.140096618357488,
"grad_norm": 0.043910861015319824,
"learning_rate": 9.980189832367966e-05,
"loss": 0.0097,
"step": 650
},
{
"epoch": 3.140096618357488,
"eval_loss": 0.009869378991425037,
"eval_runtime": 20.298,
"eval_samples_per_second": 4.927,
"eval_steps_per_second": 0.148,
"step": 650
},
{
"epoch": 3.1449275362318843,
"grad_norm": 0.05177716165781021,
"learning_rate": 9.979888743505225e-05,
"loss": 0.0095,
"step": 651
},
{
"epoch": 3.14975845410628,
"grad_norm": 0.07282808423042297,
"learning_rate": 9.979585388397308e-05,
"loss": 0.0145,
"step": 652
},
{
"epoch": 3.154589371980676,
"grad_norm": 0.05156973749399185,
"learning_rate": 9.979279767182262e-05,
"loss": 0.0098,
"step": 653
},
{
"epoch": 3.1594202898550723,
"grad_norm": 0.055524859577417374,
"learning_rate": 9.978971879999169e-05,
"loss": 0.0109,
"step": 654
},
{
"epoch": 3.1642512077294684,
"grad_norm": 0.06012894958257675,
"learning_rate": 9.97866172698814e-05,
"loss": 0.0087,
"step": 655
},
{
"epoch": 3.1690821256038646,
"grad_norm": 0.05175028741359711,
"learning_rate": 9.978349308290325e-05,
"loss": 0.0072,
"step": 656
},
{
"epoch": 3.1739130434782608,
"grad_norm": 0.04955373331904411,
"learning_rate": 9.978034624047895e-05,
"loss": 0.0092,
"step": 657
},
{
"epoch": 3.178743961352657,
"grad_norm": 0.05167277157306671,
"learning_rate": 9.977717674404056e-05,
"loss": 0.0088,
"step": 658
},
{
"epoch": 3.183574879227053,
"grad_norm": 0.04037657380104065,
"learning_rate": 9.977398459503049e-05,
"loss": 0.009,
"step": 659
},
{
"epoch": 3.1884057971014492,
"grad_norm": 0.04062899202108383,
"learning_rate": 9.977076979490138e-05,
"loss": 0.007,
"step": 660
},
{
"epoch": 3.1932367149758454,
"grad_norm": 0.06958416849374771,
"learning_rate": 9.976753234511627e-05,
"loss": 0.0119,
"step": 661
},
{
"epoch": 3.1980676328502415,
"grad_norm": 0.05316910520195961,
"learning_rate": 9.97642722471484e-05,
"loss": 0.0108,
"step": 662
},
{
"epoch": 3.2028985507246377,
"grad_norm": 0.050726763904094696,
"learning_rate": 9.976098950248141e-05,
"loss": 0.0088,
"step": 663
},
{
"epoch": 3.207729468599034,
"grad_norm": 0.047353360801935196,
"learning_rate": 9.975768411260917e-05,
"loss": 0.0078,
"step": 664
},
{
"epoch": 3.21256038647343,
"grad_norm": 0.0337323397397995,
"learning_rate": 9.975435607903596e-05,
"loss": 0.0085,
"step": 665
},
{
"epoch": 3.217391304347826,
"grad_norm": 0.051542479544878006,
"learning_rate": 9.975100540327624e-05,
"loss": 0.0085,
"step": 666
},
{
"epoch": 3.2222222222222223,
"grad_norm": 0.07729382812976837,
"learning_rate": 9.974763208685487e-05,
"loss": 0.0126,
"step": 667
},
{
"epoch": 3.2270531400966185,
"grad_norm": 0.06189757585525513,
"learning_rate": 9.974423613130697e-05,
"loss": 0.009,
"step": 668
},
{
"epoch": 3.2318840579710146,
"grad_norm": 0.04578680172562599,
"learning_rate": 9.974081753817795e-05,
"loss": 0.0075,
"step": 669
},
{
"epoch": 3.236714975845411,
"grad_norm": 0.04176221042871475,
"learning_rate": 9.973737630902356e-05,
"loss": 0.0083,
"step": 670
},
{
"epoch": 3.241545893719807,
"grad_norm": 0.08334269374608994,
"learning_rate": 9.973391244540983e-05,
"loss": 0.0106,
"step": 671
},
{
"epoch": 3.246376811594203,
"grad_norm": 0.06806229054927826,
"learning_rate": 9.973042594891309e-05,
"loss": 0.0134,
"step": 672
},
{
"epoch": 3.2512077294685993,
"grad_norm": 0.04712403565645218,
"learning_rate": 9.972691682111997e-05,
"loss": 0.01,
"step": 673
},
{
"epoch": 3.2560386473429954,
"grad_norm": 0.04654091224074364,
"learning_rate": 9.972338506362742e-05,
"loss": 0.01,
"step": 674
},
{
"epoch": 3.260869565217391,
"grad_norm": 0.05907722935080528,
"learning_rate": 9.971983067804265e-05,
"loss": 0.0089,
"step": 675
},
{
"epoch": 3.260869565217391,
"eval_loss": 0.009203135967254639,
"eval_runtime": 20.2973,
"eval_samples_per_second": 4.927,
"eval_steps_per_second": 0.148,
"step": 675
},
{
"epoch": 3.2657004830917873,
"grad_norm": 0.06332624703645706,
"learning_rate": 9.971625366598319e-05,
"loss": 0.0099,
"step": 676
},
{
"epoch": 3.2705314009661834,
"grad_norm": 0.06277251988649368,
"learning_rate": 9.971265402907688e-05,
"loss": 0.0085,
"step": 677
},
{
"epoch": 3.2753623188405796,
"grad_norm": 0.05057535693049431,
"learning_rate": 9.970903176896183e-05,
"loss": 0.011,
"step": 678
},
{
"epoch": 3.2801932367149758,
"grad_norm": 0.04588181897997856,
"learning_rate": 9.970538688728644e-05,
"loss": 0.0086,
"step": 679
},
{
"epoch": 3.285024154589372,
"grad_norm": 0.05869683250784874,
"learning_rate": 9.970171938570946e-05,
"loss": 0.0081,
"step": 680
},
{
"epoch": 3.289855072463768,
"grad_norm": 0.050425752997398376,
"learning_rate": 9.969802926589986e-05,
"loss": 0.0121,
"step": 681
},
{
"epoch": 3.2946859903381642,
"grad_norm": 0.060307394713163376,
"learning_rate": 9.969431652953695e-05,
"loss": 0.0089,
"step": 682
},
{
"epoch": 3.2995169082125604,
"grad_norm": 0.07875365763902664,
"learning_rate": 9.969058117831034e-05,
"loss": 0.011,
"step": 683
},
{
"epoch": 3.3043478260869565,
"grad_norm": 0.07338164746761322,
"learning_rate": 9.968682321391986e-05,
"loss": 0.0112,
"step": 684
},
{
"epoch": 3.3091787439613527,
"grad_norm": 0.09912315756082535,
"learning_rate": 9.968304263807574e-05,
"loss": 0.0113,
"step": 685
},
{
"epoch": 3.314009661835749,
"grad_norm": 0.10973814129829407,
"learning_rate": 9.96792394524984e-05,
"loss": 0.0096,
"step": 686
},
{
"epoch": 3.318840579710145,
"grad_norm": 0.06479693949222565,
"learning_rate": 9.967541365891863e-05,
"loss": 0.0109,
"step": 687
},
{
"epoch": 3.323671497584541,
"grad_norm": 0.05435623601078987,
"learning_rate": 9.967156525907743e-05,
"loss": 0.0065,
"step": 688
},
{
"epoch": 3.3285024154589373,
"grad_norm": 0.04574191942811012,
"learning_rate": 9.966769425472616e-05,
"loss": 0.0066,
"step": 689
},
{
"epoch": 3.3333333333333335,
"grad_norm": 0.04818063601851463,
"learning_rate": 9.966380064762642e-05,
"loss": 0.0064,
"step": 690
},
{
"epoch": 3.3381642512077296,
"grad_norm": 0.046589504927396774,
"learning_rate": 9.96598844395501e-05,
"loss": 0.007,
"step": 691
},
{
"epoch": 3.342995169082126,
"grad_norm": 0.05948101729154587,
"learning_rate": 9.96559456322794e-05,
"loss": 0.012,
"step": 692
},
{
"epoch": 3.3478260869565215,
"grad_norm": 0.06270439177751541,
"learning_rate": 9.96519842276068e-05,
"loss": 0.0118,
"step": 693
},
{
"epoch": 3.3526570048309177,
"grad_norm": 0.047514282166957855,
"learning_rate": 9.964800022733504e-05,
"loss": 0.0094,
"step": 694
},
{
"epoch": 3.357487922705314,
"grad_norm": 0.04377506673336029,
"learning_rate": 9.964399363327716e-05,
"loss": 0.0109,
"step": 695
},
{
"epoch": 3.36231884057971,
"grad_norm": 0.04509812965989113,
"learning_rate": 9.963996444725647e-05,
"loss": 0.0112,
"step": 696
},
{
"epoch": 3.367149758454106,
"grad_norm": 0.03941660374403,
"learning_rate": 9.96359126711066e-05,
"loss": 0.0099,
"step": 697
},
{
"epoch": 3.3719806763285023,
"grad_norm": 0.04203685000538826,
"learning_rate": 9.963183830667138e-05,
"loss": 0.0088,
"step": 698
},
{
"epoch": 3.3768115942028984,
"grad_norm": 0.05573079362511635,
"learning_rate": 9.9627741355805e-05,
"loss": 0.0095,
"step": 699
},
{
"epoch": 3.3816425120772946,
"grad_norm": 0.04746829718351364,
"learning_rate": 9.96236218203719e-05,
"loss": 0.0087,
"step": 700
},
{
"epoch": 3.3816425120772946,
"eval_loss": 0.00878093857318163,
"eval_runtime": 20.3171,
"eval_samples_per_second": 4.922,
"eval_steps_per_second": 0.148,
"step": 700
},
{
"epoch": 3.3864734299516908,
"grad_norm": 0.056552231311798096,
"learning_rate": 9.96194797022468e-05,
"loss": 0.0129,
"step": 701
},
{
"epoch": 3.391304347826087,
"grad_norm": 0.0671212300658226,
"learning_rate": 9.961531500331469e-05,
"loss": 0.0108,
"step": 702
},
{
"epoch": 3.396135265700483,
"grad_norm": 0.04948098585009575,
"learning_rate": 9.961112772547083e-05,
"loss": 0.0126,
"step": 703
},
{
"epoch": 3.4009661835748792,
"grad_norm": 0.058139242231845856,
"learning_rate": 9.960691787062076e-05,
"loss": 0.0089,
"step": 704
},
{
"epoch": 3.4057971014492754,
"grad_norm": 0.045286696404218674,
"learning_rate": 9.960268544068032e-05,
"loss": 0.0083,
"step": 705
},
{
"epoch": 3.4106280193236715,
"grad_norm": 0.07364138215780258,
"learning_rate": 9.959843043757557e-05,
"loss": 0.0134,
"step": 706
},
{
"epoch": 3.4154589371980677,
"grad_norm": 0.052311353385448456,
"learning_rate": 9.959415286324289e-05,
"loss": 0.0076,
"step": 707
},
{
"epoch": 3.420289855072464,
"grad_norm": 0.048165835440158844,
"learning_rate": 9.958985271962896e-05,
"loss": 0.0085,
"step": 708
},
{
"epoch": 3.42512077294686,
"grad_norm": 0.039163313806056976,
"learning_rate": 9.958553000869061e-05,
"loss": 0.0084,
"step": 709
},
{
"epoch": 3.429951690821256,
"grad_norm": 0.07590556144714355,
"learning_rate": 9.958118473239507e-05,
"loss": 0.0089,
"step": 710
},
{
"epoch": 3.4347826086956523,
"grad_norm": 0.05805916339159012,
"learning_rate": 9.957681689271977e-05,
"loss": 0.0091,
"step": 711
},
{
"epoch": 3.4396135265700485,
"grad_norm": 0.05946679040789604,
"learning_rate": 9.957242649165241e-05,
"loss": 0.0111,
"step": 712
},
{
"epoch": 3.4444444444444446,
"grad_norm": 0.04727727919816971,
"learning_rate": 9.956801353119099e-05,
"loss": 0.0097,
"step": 713
},
{
"epoch": 3.449275362318841,
"grad_norm": 0.07006451487541199,
"learning_rate": 9.956357801334375e-05,
"loss": 0.0104,
"step": 714
},
{
"epoch": 3.454106280193237,
"grad_norm": 0.04728998243808746,
"learning_rate": 9.955911994012923e-05,
"loss": 0.0103,
"step": 715
},
{
"epoch": 3.4589371980676327,
"grad_norm": 0.03930573910474777,
"learning_rate": 9.955463931357616e-05,
"loss": 0.0087,
"step": 716
},
{
"epoch": 3.463768115942029,
"grad_norm": 0.04594094678759575,
"learning_rate": 9.955013613572362e-05,
"loss": 0.0069,
"step": 717
},
{
"epoch": 3.468599033816425,
"grad_norm": 0.05382443219423294,
"learning_rate": 9.954561040862088e-05,
"loss": 0.008,
"step": 718
},
{
"epoch": 3.473429951690821,
"grad_norm": 0.05387111380696297,
"learning_rate": 9.954106213432755e-05,
"loss": 0.0079,
"step": 719
},
{
"epoch": 3.4782608695652173,
"grad_norm": 0.0623965822160244,
"learning_rate": 9.95364913149134e-05,
"loss": 0.012,
"step": 720
},
{
"epoch": 3.4830917874396135,
"grad_norm": 0.04346054419875145,
"learning_rate": 9.953189795245857e-05,
"loss": 0.0098,
"step": 721
},
{
"epoch": 3.4879227053140096,
"grad_norm": 0.05038152635097504,
"learning_rate": 9.952728204905338e-05,
"loss": 0.0098,
"step": 722
},
{
"epoch": 3.4927536231884058,
"grad_norm": 0.04060295969247818,
"learning_rate": 9.952264360679844e-05,
"loss": 0.0069,
"step": 723
},
{
"epoch": 3.497584541062802,
"grad_norm": 0.04667475074529648,
"learning_rate": 9.951798262780458e-05,
"loss": 0.0098,
"step": 724
},
{
"epoch": 3.502415458937198,
"grad_norm": 0.062279459089040756,
"learning_rate": 9.951329911419298e-05,
"loss": 0.0083,
"step": 725
},
{
"epoch": 3.502415458937198,
"eval_loss": 0.00877303909510374,
"eval_runtime": 20.3131,
"eval_samples_per_second": 4.923,
"eval_steps_per_second": 0.148,
"step": 725
},
{
"epoch": 3.5072463768115942,
"grad_norm": 0.07258505374193192,
"learning_rate": 9.950859306809494e-05,
"loss": 0.0086,
"step": 726
},
{
"epoch": 3.5120772946859904,
"grad_norm": 0.06038128584623337,
"learning_rate": 9.950386449165212e-05,
"loss": 0.0103,
"step": 727
},
{
"epoch": 3.5169082125603865,
"grad_norm": 0.04698696732521057,
"learning_rate": 9.94991133870164e-05,
"loss": 0.0086,
"step": 728
},
{
"epoch": 3.5217391304347827,
"grad_norm": 0.038896944373846054,
"learning_rate": 9.949433975634992e-05,
"loss": 0.0105,
"step": 729
},
{
"epoch": 3.526570048309179,
"grad_norm": 0.05197020620107651,
"learning_rate": 9.948954360182503e-05,
"loss": 0.009,
"step": 730
},
{
"epoch": 3.531400966183575,
"grad_norm": 0.05059540644288063,
"learning_rate": 9.948472492562438e-05,
"loss": 0.0114,
"step": 731
},
{
"epoch": 3.536231884057971,
"grad_norm": 0.04194273799657822,
"learning_rate": 9.947988372994086e-05,
"loss": 0.0106,
"step": 732
},
{
"epoch": 3.541062801932367,
"grad_norm": 0.04211822524666786,
"learning_rate": 9.947502001697757e-05,
"loss": 0.0098,
"step": 733
},
{
"epoch": 3.545893719806763,
"grad_norm": 0.06714488565921783,
"learning_rate": 9.947013378894792e-05,
"loss": 0.0114,
"step": 734
},
{
"epoch": 3.550724637681159,
"grad_norm": 0.038067255169153214,
"learning_rate": 9.946522504807551e-05,
"loss": 0.0087,
"step": 735
},
{
"epoch": 3.5555555555555554,
"grad_norm": 0.03965510427951813,
"learning_rate": 9.94602937965942e-05,
"loss": 0.0087,
"step": 736
},
{
"epoch": 3.5603864734299515,
"grad_norm": 0.05775628611445427,
"learning_rate": 9.945534003674812e-05,
"loss": 0.0095,
"step": 737
},
{
"epoch": 3.5652173913043477,
"grad_norm": 0.06312840431928635,
"learning_rate": 9.945036377079164e-05,
"loss": 0.0121,
"step": 738
},
{
"epoch": 3.570048309178744,
"grad_norm": 0.058142922818660736,
"learning_rate": 9.94453650009893e-05,
"loss": 0.0104,
"step": 739
},
{
"epoch": 3.57487922705314,
"grad_norm": 0.09870404750108719,
"learning_rate": 9.9440343729616e-05,
"loss": 0.0065,
"step": 740
},
{
"epoch": 3.579710144927536,
"grad_norm": 0.07584329694509506,
"learning_rate": 9.943529995895679e-05,
"loss": 0.0097,
"step": 741
},
{
"epoch": 3.5845410628019323,
"grad_norm": 0.09112714231014252,
"learning_rate": 9.943023369130698e-05,
"loss": 0.0115,
"step": 742
},
{
"epoch": 3.5893719806763285,
"grad_norm": 0.032687995582818985,
"learning_rate": 9.942514492897212e-05,
"loss": 0.0072,
"step": 743
},
{
"epoch": 3.5942028985507246,
"grad_norm": 0.047948360443115234,
"learning_rate": 9.942003367426803e-05,
"loss": 0.0084,
"step": 744
},
{
"epoch": 3.5990338164251208,
"grad_norm": 0.05047725886106491,
"learning_rate": 9.941489992952071e-05,
"loss": 0.0082,
"step": 745
},
{
"epoch": 3.603864734299517,
"grad_norm": 0.045492023229599,
"learning_rate": 9.940974369706642e-05,
"loss": 0.0085,
"step": 746
},
{
"epoch": 3.608695652173913,
"grad_norm": 0.042984895408153534,
"learning_rate": 9.940456497925168e-05,
"loss": 0.0082,
"step": 747
},
{
"epoch": 3.6135265700483092,
"grad_norm": 0.06112075597047806,
"learning_rate": 9.939936377843321e-05,
"loss": 0.0097,
"step": 748
},
{
"epoch": 3.6183574879227054,
"grad_norm": 0.06138254702091217,
"learning_rate": 9.939414009697795e-05,
"loss": 0.0073,
"step": 749
},
{
"epoch": 3.6231884057971016,
"grad_norm": 0.05048954114317894,
"learning_rate": 9.938889393726314e-05,
"loss": 0.0088,
"step": 750
},
{
"epoch": 3.6231884057971016,
"eval_loss": 0.007979786954820156,
"eval_runtime": 20.3088,
"eval_samples_per_second": 4.924,
"eval_steps_per_second": 0.148,
"step": 750
},
{
"epoch": 3.6280193236714977,
"grad_norm": 0.05653483420610428,
"learning_rate": 9.938362530167613e-05,
"loss": 0.0061,
"step": 751
},
{
"epoch": 3.632850241545894,
"grad_norm": 0.046065203845500946,
"learning_rate": 9.937833419261462e-05,
"loss": 0.0091,
"step": 752
},
{
"epoch": 3.63768115942029,
"grad_norm": 0.053545523434877396,
"learning_rate": 9.937302061248646e-05,
"loss": 0.008,
"step": 753
},
{
"epoch": 3.642512077294686,
"grad_norm": 0.06943827867507935,
"learning_rate": 9.936768456370977e-05,
"loss": 0.0239,
"step": 754
},
{
"epoch": 3.6473429951690823,
"grad_norm": 0.04048341512680054,
"learning_rate": 9.936232604871285e-05,
"loss": 0.0092,
"step": 755
},
{
"epoch": 3.6521739130434785,
"grad_norm": 0.05726747587323189,
"learning_rate": 9.935694506993427e-05,
"loss": 0.0163,
"step": 756
},
{
"epoch": 3.6570048309178746,
"grad_norm": 0.039764195680618286,
"learning_rate": 9.935154162982281e-05,
"loss": 0.0073,
"step": 757
},
{
"epoch": 3.661835748792271,
"grad_norm": 0.05742327868938446,
"learning_rate": 9.934611573083744e-05,
"loss": 0.0091,
"step": 758
},
{
"epoch": 3.6666666666666665,
"grad_norm": 0.0707569345831871,
"learning_rate": 9.934066737544741e-05,
"loss": 0.009,
"step": 759
},
{
"epoch": 3.6714975845410627,
"grad_norm": 0.04180197790265083,
"learning_rate": 9.93351965661321e-05,
"loss": 0.0109,
"step": 760
},
{
"epoch": 3.676328502415459,
"grad_norm": 0.05421733483672142,
"learning_rate": 9.932970330538123e-05,
"loss": 0.0106,
"step": 761
},
{
"epoch": 3.681159420289855,
"grad_norm": 0.03703097254037857,
"learning_rate": 9.932418759569462e-05,
"loss": 0.0083,
"step": 762
},
{
"epoch": 3.685990338164251,
"grad_norm": 0.04790617153048515,
"learning_rate": 9.931864943958238e-05,
"loss": 0.0092,
"step": 763
},
{
"epoch": 3.6908212560386473,
"grad_norm": 0.057676345109939575,
"learning_rate": 9.931308883956479e-05,
"loss": 0.0085,
"step": 764
},
{
"epoch": 3.6956521739130435,
"grad_norm": 0.03428034111857414,
"learning_rate": 9.930750579817239e-05,
"loss": 0.0066,
"step": 765
},
{
"epoch": 3.7004830917874396,
"grad_norm": 0.05824226886034012,
"learning_rate": 9.93019003179459e-05,
"loss": 0.0113,
"step": 766
},
{
"epoch": 3.7053140096618358,
"grad_norm": 0.032023362815380096,
"learning_rate": 9.929627240143625e-05,
"loss": 0.0073,
"step": 767
},
{
"epoch": 3.710144927536232,
"grad_norm": 0.04681350663304329,
"learning_rate": 9.92906220512046e-05,
"loss": 0.011,
"step": 768
},
{
"epoch": 3.714975845410628,
"grad_norm": 0.05592908337712288,
"learning_rate": 9.92849492698223e-05,
"loss": 0.0105,
"step": 769
},
{
"epoch": 3.7198067632850242,
"grad_norm": 0.042512789368629456,
"learning_rate": 9.927925405987093e-05,
"loss": 0.0094,
"step": 770
},
{
"epoch": 3.7246376811594204,
"grad_norm": 0.04821282997727394,
"learning_rate": 9.927353642394224e-05,
"loss": 0.0094,
"step": 771
},
{
"epoch": 3.7294685990338166,
"grad_norm": 0.05211241543292999,
"learning_rate": 9.926779636463824e-05,
"loss": 0.0116,
"step": 772
},
{
"epoch": 3.7342995169082127,
"grad_norm": 0.04904523119330406,
"learning_rate": 9.926203388457107e-05,
"loss": 0.0119,
"step": 773
},
{
"epoch": 3.7391304347826084,
"grad_norm": 0.061542410403490067,
"learning_rate": 9.925624898636317e-05,
"loss": 0.009,
"step": 774
},
{
"epoch": 3.7439613526570046,
"grad_norm": 0.04940609633922577,
"learning_rate": 9.925044167264708e-05,
"loss": 0.0058,
"step": 775
},
{
"epoch": 3.7439613526570046,
"eval_loss": 0.006909946445375681,
"eval_runtime": 20.3264,
"eval_samples_per_second": 4.92,
"eval_steps_per_second": 0.148,
"step": 775
},
{
"epoch": 3.7487922705314007,
"grad_norm": 0.07155938446521759,
"learning_rate": 9.924461194606561e-05,
"loss": 0.0092,
"step": 776
},
{
"epoch": 3.753623188405797,
"grad_norm": 0.04605531692504883,
"learning_rate": 9.923875980927175e-05,
"loss": 0.0065,
"step": 777
},
{
"epoch": 3.758454106280193,
"grad_norm": 0.0463431291282177,
"learning_rate": 9.923288526492869e-05,
"loss": 0.0062,
"step": 778
},
{
"epoch": 3.763285024154589,
"grad_norm": 0.04100046679377556,
"learning_rate": 9.922698831570982e-05,
"loss": 0.0102,
"step": 779
},
{
"epoch": 3.7681159420289854,
"grad_norm": 0.06026915833353996,
"learning_rate": 9.92210689642987e-05,
"loss": 0.0109,
"step": 780
},
{
"epoch": 3.7729468599033815,
"grad_norm": 0.03291574865579605,
"learning_rate": 9.921512721338912e-05,
"loss": 0.0066,
"step": 781
},
{
"epoch": 3.7777777777777777,
"grad_norm": 0.04719441756606102,
"learning_rate": 9.920916306568504e-05,
"loss": 0.007,
"step": 782
},
{
"epoch": 3.782608695652174,
"grad_norm": 0.06509106606245041,
"learning_rate": 9.920317652390063e-05,
"loss": 0.0092,
"step": 783
},
{
"epoch": 3.78743961352657,
"grad_norm": 0.06969207525253296,
"learning_rate": 9.919716759076025e-05,
"loss": 0.0157,
"step": 784
},
{
"epoch": 3.792270531400966,
"grad_norm": 0.044033829122781754,
"learning_rate": 9.919113626899841e-05,
"loss": 0.0064,
"step": 785
},
{
"epoch": 3.7971014492753623,
"grad_norm": 0.05721135437488556,
"learning_rate": 9.918508256135988e-05,
"loss": 0.0112,
"step": 786
},
{
"epoch": 3.8019323671497585,
"grad_norm": 0.07257521897554398,
"learning_rate": 9.917900647059955e-05,
"loss": 0.0084,
"step": 787
},
{
"epoch": 3.8067632850241546,
"grad_norm": 0.04365171492099762,
"learning_rate": 9.917290799948253e-05,
"loss": 0.008,
"step": 788
},
{
"epoch": 3.8115942028985508,
"grad_norm": 0.05915320664644241,
"learning_rate": 9.916678715078411e-05,
"loss": 0.0075,
"step": 789
},
{
"epoch": 3.816425120772947,
"grad_norm": 0.06364689022302628,
"learning_rate": 9.916064392728979e-05,
"loss": 0.0052,
"step": 790
},
{
"epoch": 3.821256038647343,
"grad_norm": 0.0655299499630928,
"learning_rate": 9.915447833179519e-05,
"loss": 0.0149,
"step": 791
},
{
"epoch": 3.8260869565217392,
"grad_norm": 0.03559907153248787,
"learning_rate": 9.914829036710614e-05,
"loss": 0.0067,
"step": 792
},
{
"epoch": 3.8309178743961354,
"grad_norm": 0.05308472737669945,
"learning_rate": 9.914208003603869e-05,
"loss": 0.0088,
"step": 793
},
{
"epoch": 3.8357487922705316,
"grad_norm": 0.044624749571084976,
"learning_rate": 9.913584734141901e-05,
"loss": 0.0076,
"step": 794
},
{
"epoch": 3.8405797101449277,
"grad_norm": 0.08039918541908264,
"learning_rate": 9.912959228608348e-05,
"loss": 0.0079,
"step": 795
},
{
"epoch": 3.845410628019324,
"grad_norm": 0.055277448147535324,
"learning_rate": 9.912331487287864e-05,
"loss": 0.0134,
"step": 796
},
{
"epoch": 3.85024154589372,
"grad_norm": 0.047117929905653,
"learning_rate": 9.911701510466124e-05,
"loss": 0.0089,
"step": 797
},
{
"epoch": 3.855072463768116,
"grad_norm": 0.04270472377538681,
"learning_rate": 9.911069298429814e-05,
"loss": 0.0085,
"step": 798
},
{
"epoch": 3.8599033816425123,
"grad_norm": 0.060901861637830734,
"learning_rate": 9.910434851466642e-05,
"loss": 0.011,
"step": 799
},
{
"epoch": 3.864734299516908,
"grad_norm": 0.0431562140583992,
"learning_rate": 9.90979816986533e-05,
"loss": 0.008,
"step": 800
},
{
"epoch": 3.864734299516908,
"eval_loss": 0.007014184258878231,
"eval_runtime": 20.3118,
"eval_samples_per_second": 4.923,
"eval_steps_per_second": 0.148,
"step": 800
},
{
"epoch": 3.869565217391304,
"grad_norm": 0.03784063830971718,
"learning_rate": 9.909159253915623e-05,
"loss": 0.009,
"step": 801
},
{
"epoch": 3.8743961352657004,
"grad_norm": 0.046868495643138885,
"learning_rate": 9.908518103908274e-05,
"loss": 0.0112,
"step": 802
},
{
"epoch": 3.8792270531400965,
"grad_norm": 0.034640517085790634,
"learning_rate": 9.907874720135061e-05,
"loss": 0.0082,
"step": 803
},
{
"epoch": 3.8840579710144927,
"grad_norm": 0.03212960809469223,
"learning_rate": 9.907229102888772e-05,
"loss": 0.0073,
"step": 804
},
{
"epoch": 3.888888888888889,
"grad_norm": 0.038106419146060944,
"learning_rate": 9.906581252463216e-05,
"loss": 0.0083,
"step": 805
},
{
"epoch": 3.893719806763285,
"grad_norm": 0.04272741824388504,
"learning_rate": 9.905931169153215e-05,
"loss": 0.0126,
"step": 806
},
{
"epoch": 3.898550724637681,
"grad_norm": 0.05206140875816345,
"learning_rate": 9.905278853254609e-05,
"loss": 0.0095,
"step": 807
},
{
"epoch": 3.9033816425120773,
"grad_norm": 0.04624548926949501,
"learning_rate": 9.904624305064255e-05,
"loss": 0.0088,
"step": 808
},
{
"epoch": 3.9082125603864735,
"grad_norm": 0.04489309340715408,
"learning_rate": 9.903967524880022e-05,
"loss": 0.0075,
"step": 809
},
{
"epoch": 3.9130434782608696,
"grad_norm": 0.05215749889612198,
"learning_rate": 9.903308513000798e-05,
"loss": 0.0128,
"step": 810
},
{
"epoch": 3.917874396135266,
"grad_norm": 0.05710824206471443,
"learning_rate": 9.902647269726489e-05,
"loss": 0.0084,
"step": 811
},
{
"epoch": 3.922705314009662,
"grad_norm": 0.052041713148355484,
"learning_rate": 9.901983795358008e-05,
"loss": 0.0122,
"step": 812
},
{
"epoch": 3.927536231884058,
"grad_norm": 0.043076708912849426,
"learning_rate": 9.901318090197291e-05,
"loss": 0.008,
"step": 813
},
{
"epoch": 3.9323671497584543,
"grad_norm": 0.03354997932910919,
"learning_rate": 9.900650154547286e-05,
"loss": 0.0078,
"step": 814
},
{
"epoch": 3.9371980676328504,
"grad_norm": 0.04225970059633255,
"learning_rate": 9.89997998871196e-05,
"loss": 0.0066,
"step": 815
},
{
"epoch": 3.942028985507246,
"grad_norm": 0.047922004014253616,
"learning_rate": 9.899307592996287e-05,
"loss": 0.0075,
"step": 816
},
{
"epoch": 3.9468599033816423,
"grad_norm": 0.06206347793340683,
"learning_rate": 9.898632967706264e-05,
"loss": 0.0135,
"step": 817
},
{
"epoch": 3.9516908212560384,
"grad_norm": 0.04242934286594391,
"learning_rate": 9.897956113148899e-05,
"loss": 0.0085,
"step": 818
},
{
"epoch": 3.9565217391304346,
"grad_norm": 0.05467364192008972,
"learning_rate": 9.897277029632212e-05,
"loss": 0.0091,
"step": 819
},
{
"epoch": 3.9613526570048307,
"grad_norm": 0.05071014538407326,
"learning_rate": 9.896595717465243e-05,
"loss": 0.0062,
"step": 820
},
{
"epoch": 3.966183574879227,
"grad_norm": 0.05355323106050491,
"learning_rate": 9.89591217695804e-05,
"loss": 0.007,
"step": 821
},
{
"epoch": 3.971014492753623,
"grad_norm": 0.04782043769955635,
"learning_rate": 9.895226408421669e-05,
"loss": 0.0089,
"step": 822
},
{
"epoch": 3.975845410628019,
"grad_norm": 0.07460252195596695,
"learning_rate": 9.894538412168213e-05,
"loss": 0.0076,
"step": 823
},
{
"epoch": 3.9806763285024154,
"grad_norm": 0.0481514111161232,
"learning_rate": 9.89384818851076e-05,
"loss": 0.0115,
"step": 824
},
{
"epoch": 3.9855072463768115,
"grad_norm": 0.045337025076150894,
"learning_rate": 9.893155737763419e-05,
"loss": 0.0099,
"step": 825
},
{
"epoch": 3.9855072463768115,
"eval_loss": 0.007348967250436544,
"eval_runtime": 20.3101,
"eval_samples_per_second": 4.924,
"eval_steps_per_second": 0.148,
"step": 825
},
{
"epoch": 3.9903381642512077,
"grad_norm": 0.07469549030065536,
"learning_rate": 9.892461060241312e-05,
"loss": 0.0115,
"step": 826
},
{
"epoch": 3.995169082125604,
"grad_norm": 0.05278422683477402,
"learning_rate": 9.891764156260568e-05,
"loss": 0.0076,
"step": 827
},
{
"epoch": 4.0,
"grad_norm": 0.09103444963693619,
"learning_rate": 9.891065026138338e-05,
"loss": 0.0118,
"step": 828
},
{
"epoch": 4.004830917874396,
"grad_norm": 0.07645177841186523,
"learning_rate": 9.890363670192776e-05,
"loss": 0.0072,
"step": 829
},
{
"epoch": 4.009661835748792,
"grad_norm": 0.051178693771362305,
"learning_rate": 9.889660088743063e-05,
"loss": 0.0074,
"step": 830
},
{
"epoch": 4.0144927536231885,
"grad_norm": 0.1458490490913391,
"learning_rate": 9.888954282109377e-05,
"loss": 0.0082,
"step": 831
},
{
"epoch": 4.019323671497585,
"grad_norm": 0.06586052477359772,
"learning_rate": 9.888246250612921e-05,
"loss": 0.0073,
"step": 832
},
{
"epoch": 4.024154589371981,
"grad_norm": 0.12695728242397308,
"learning_rate": 9.887535994575902e-05,
"loss": 0.0103,
"step": 833
},
{
"epoch": 4.028985507246377,
"grad_norm": 0.036658771336078644,
"learning_rate": 9.886823514321548e-05,
"loss": 0.0063,
"step": 834
},
{
"epoch": 4.033816425120773,
"grad_norm": 0.0739186480641365,
"learning_rate": 9.886108810174088e-05,
"loss": 0.0096,
"step": 835
},
{
"epoch": 4.038647342995169,
"grad_norm": 0.0768413096666336,
"learning_rate": 9.885391882458773e-05,
"loss": 0.0103,
"step": 836
},
{
"epoch": 4.043478260869565,
"grad_norm": 0.03403015434741974,
"learning_rate": 9.88467273150186e-05,
"loss": 0.008,
"step": 837
},
{
"epoch": 4.048309178743962,
"grad_norm": 0.05291101709008217,
"learning_rate": 9.883951357630622e-05,
"loss": 0.0084,
"step": 838
},
{
"epoch": 4.053140096618358,
"grad_norm": 0.08055385947227478,
"learning_rate": 9.88322776117334e-05,
"loss": 0.0105,
"step": 839
},
{
"epoch": 4.057971014492754,
"grad_norm": 0.04251623898744583,
"learning_rate": 9.882501942459308e-05,
"loss": 0.0091,
"step": 840
},
{
"epoch": 4.06280193236715,
"grad_norm": 0.03910037502646446,
"learning_rate": 9.881773901818832e-05,
"loss": 0.0066,
"step": 841
},
{
"epoch": 4.067632850241546,
"grad_norm": 0.04807236045598984,
"learning_rate": 9.881043639583227e-05,
"loss": 0.0089,
"step": 842
},
{
"epoch": 4.072463768115942,
"grad_norm": 0.030411390587687492,
"learning_rate": 9.880311156084823e-05,
"loss": 0.0048,
"step": 843
},
{
"epoch": 4.0772946859903385,
"grad_norm": 0.03571973741054535,
"learning_rate": 9.879576451656955e-05,
"loss": 0.0085,
"step": 844
},
{
"epoch": 4.082125603864735,
"grad_norm": 0.04853428527712822,
"learning_rate": 9.878839526633974e-05,
"loss": 0.009,
"step": 845
},
{
"epoch": 4.086956521739131,
"grad_norm": 0.04168358072638512,
"learning_rate": 9.878100381351239e-05,
"loss": 0.0096,
"step": 846
},
{
"epoch": 4.091787439613527,
"grad_norm": 0.06781593710184097,
"learning_rate": 9.877359016145117e-05,
"loss": 0.006,
"step": 847
},
{
"epoch": 4.096618357487923,
"grad_norm": 0.06934653222560883,
"learning_rate": 9.876615431352994e-05,
"loss": 0.0075,
"step": 848
},
{
"epoch": 4.101449275362318,
"grad_norm": 0.04383888468146324,
"learning_rate": 9.875869627313255e-05,
"loss": 0.0078,
"step": 849
},
{
"epoch": 4.106280193236715,
"grad_norm": 0.06643518805503845,
"learning_rate": 9.875121604365302e-05,
"loss": 0.0072,
"step": 850
},
{
"epoch": 4.106280193236715,
"eval_loss": 0.011321146041154861,
"eval_runtime": 20.2974,
"eval_samples_per_second": 4.927,
"eval_steps_per_second": 0.148,
"step": 850
},
{
"epoch": 4.111111111111111,
"grad_norm": 0.06936809420585632,
"learning_rate": 9.874371362849548e-05,
"loss": 0.0155,
"step": 851
},
{
"epoch": 4.115942028985507,
"grad_norm": 0.06398960947990417,
"learning_rate": 9.873618903107406e-05,
"loss": 0.0071,
"step": 852
},
{
"epoch": 4.120772946859903,
"grad_norm": 0.05030336230993271,
"learning_rate": 9.87286422548131e-05,
"loss": 0.0103,
"step": 853
},
{
"epoch": 4.125603864734299,
"grad_norm": 0.0524723194539547,
"learning_rate": 9.872107330314696e-05,
"loss": 0.0075,
"step": 854
},
{
"epoch": 4.130434782608695,
"grad_norm": 0.05538473650813103,
"learning_rate": 9.871348217952012e-05,
"loss": 0.0069,
"step": 855
},
{
"epoch": 4.1352657004830915,
"grad_norm": 0.04896744713187218,
"learning_rate": 9.870586888738715e-05,
"loss": 0.007,
"step": 856
},
{
"epoch": 4.140096618357488,
"grad_norm": 0.03770635277032852,
"learning_rate": 9.869823343021271e-05,
"loss": 0.0071,
"step": 857
},
{
"epoch": 4.144927536231884,
"grad_norm": 0.023480841889977455,
"learning_rate": 9.869057581147152e-05,
"loss": 0.005,
"step": 858
},
{
"epoch": 4.14975845410628,
"grad_norm": 0.039378829300403595,
"learning_rate": 9.868289603464842e-05,
"loss": 0.0063,
"step": 859
},
{
"epoch": 4.154589371980676,
"grad_norm": 0.03883344307541847,
"learning_rate": 9.86751941032383e-05,
"loss": 0.0078,
"step": 860
},
{
"epoch": 4.159420289855072,
"grad_norm": 0.03314659744501114,
"learning_rate": 9.866747002074617e-05,
"loss": 0.0069,
"step": 861
},
{
"epoch": 4.164251207729468,
"grad_norm": 0.05089738592505455,
"learning_rate": 9.865972379068711e-05,
"loss": 0.0097,
"step": 862
},
{
"epoch": 4.169082125603865,
"grad_norm": 0.05422361195087433,
"learning_rate": 9.865195541658623e-05,
"loss": 0.0071,
"step": 863
},
{
"epoch": 4.173913043478261,
"grad_norm": 0.0533452071249485,
"learning_rate": 9.86441649019788e-05,
"loss": 0.0099,
"step": 864
},
{
"epoch": 4.178743961352657,
"grad_norm": 0.04176698252558708,
"learning_rate": 9.86363522504101e-05,
"loss": 0.007,
"step": 865
},
{
"epoch": 4.183574879227053,
"grad_norm": 0.034925658255815506,
"learning_rate": 9.862851746543554e-05,
"loss": 0.007,
"step": 866
},
{
"epoch": 4.188405797101449,
"grad_norm": 0.06717666238546371,
"learning_rate": 9.862066055062051e-05,
"loss": 0.0058,
"step": 867
},
{
"epoch": 4.193236714975845,
"grad_norm": 0.05857114866375923,
"learning_rate": 9.861278150954059e-05,
"loss": 0.0057,
"step": 868
},
{
"epoch": 4.1980676328502415,
"grad_norm": 0.07392554730176926,
"learning_rate": 9.860488034578132e-05,
"loss": 0.0081,
"step": 869
},
{
"epoch": 4.202898550724638,
"grad_norm": 0.0408996120095253,
"learning_rate": 9.85969570629384e-05,
"loss": 0.0058,
"step": 870
},
{
"epoch": 4.207729468599034,
"grad_norm": 0.058850813657045364,
"learning_rate": 9.858901166461754e-05,
"loss": 0.0086,
"step": 871
},
{
"epoch": 4.21256038647343,
"grad_norm": 0.04763633757829666,
"learning_rate": 9.85810441544345e-05,
"loss": 0.0049,
"step": 872
},
{
"epoch": 4.217391304347826,
"grad_norm": 0.04741189256310463,
"learning_rate": 9.857305453601517e-05,
"loss": 0.0069,
"step": 873
},
{
"epoch": 4.222222222222222,
"grad_norm": 0.04383055865764618,
"learning_rate": 9.856504281299546e-05,
"loss": 0.0061,
"step": 874
},
{
"epoch": 4.2270531400966185,
"grad_norm": 0.047976478934288025,
"learning_rate": 9.85570089890213e-05,
"loss": 0.0065,
"step": 875
},
{
"epoch": 4.2270531400966185,
"eval_loss": 0.01065537054091692,
"eval_runtime": 20.3101,
"eval_samples_per_second": 4.924,
"eval_steps_per_second": 0.148,
"step": 875
},
{
"epoch": 4.231884057971015,
"grad_norm": 0.060130525380373,
"learning_rate": 9.854895306774876e-05,
"loss": 0.009,
"step": 876
},
{
"epoch": 4.236714975845411,
"grad_norm": 0.03845841810107231,
"learning_rate": 9.854087505284391e-05,
"loss": 0.0053,
"step": 877
},
{
"epoch": 4.241545893719807,
"grad_norm": 0.05535529926419258,
"learning_rate": 9.853277494798287e-05,
"loss": 0.0067,
"step": 878
},
{
"epoch": 4.246376811594203,
"grad_norm": 0.036337871104478836,
"learning_rate": 9.852465275685187e-05,
"loss": 0.0068,
"step": 879
},
{
"epoch": 4.251207729468599,
"grad_norm": 0.05601054057478905,
"learning_rate": 9.851650848314713e-05,
"loss": 0.0136,
"step": 880
},
{
"epoch": 4.256038647342995,
"grad_norm": 0.04372893646359444,
"learning_rate": 9.850834213057494e-05,
"loss": 0.0062,
"step": 881
},
{
"epoch": 4.260869565217392,
"grad_norm": 0.03550177812576294,
"learning_rate": 9.850015370285164e-05,
"loss": 0.0051,
"step": 882
},
{
"epoch": 4.265700483091788,
"grad_norm": 0.04691091179847717,
"learning_rate": 9.84919432037036e-05,
"loss": 0.0082,
"step": 883
},
{
"epoch": 4.270531400966184,
"grad_norm": 0.04239903762936592,
"learning_rate": 9.84837106368673e-05,
"loss": 0.0061,
"step": 884
},
{
"epoch": 4.27536231884058,
"grad_norm": 0.05059480294585228,
"learning_rate": 9.847545600608917e-05,
"loss": 0.0054,
"step": 885
},
{
"epoch": 4.280193236714976,
"grad_norm": 0.03358094394207001,
"learning_rate": 9.846717931512573e-05,
"loss": 0.0081,
"step": 886
},
{
"epoch": 4.285024154589372,
"grad_norm": 0.03535458818078041,
"learning_rate": 9.845888056774354e-05,
"loss": 0.0054,
"step": 887
},
{
"epoch": 4.2898550724637685,
"grad_norm": 0.03758447989821434,
"learning_rate": 9.845055976771919e-05,
"loss": 0.0057,
"step": 888
},
{
"epoch": 4.294685990338165,
"grad_norm": 0.03534655645489693,
"learning_rate": 9.844221691883929e-05,
"loss": 0.0086,
"step": 889
},
{
"epoch": 4.29951690821256,
"grad_norm": 0.05712832883000374,
"learning_rate": 9.843385202490051e-05,
"loss": 0.0086,
"step": 890
},
{
"epoch": 4.304347826086957,
"grad_norm": 0.047709014266729355,
"learning_rate": 9.842546508970955e-05,
"loss": 0.0113,
"step": 891
},
{
"epoch": 4.309178743961352,
"grad_norm": 0.04342706501483917,
"learning_rate": 9.841705611708311e-05,
"loss": 0.009,
"step": 892
},
{
"epoch": 4.314009661835748,
"grad_norm": 0.04739096015691757,
"learning_rate": 9.840862511084798e-05,
"loss": 0.0113,
"step": 893
},
{
"epoch": 4.318840579710145,
"grad_norm": 0.03753174841403961,
"learning_rate": 9.840017207484089e-05,
"loss": 0.0081,
"step": 894
},
{
"epoch": 4.323671497584541,
"grad_norm": 0.06840533018112183,
"learning_rate": 9.839169701290868e-05,
"loss": 0.0084,
"step": 895
},
{
"epoch": 4.328502415458937,
"grad_norm": 0.052864886820316315,
"learning_rate": 9.838319992890816e-05,
"loss": 0.0069,
"step": 896
},
{
"epoch": 4.333333333333333,
"grad_norm": 0.04285022243857384,
"learning_rate": 9.837468082670617e-05,
"loss": 0.0085,
"step": 897
},
{
"epoch": 4.338164251207729,
"grad_norm": 0.029472695663571358,
"learning_rate": 9.836613971017961e-05,
"loss": 0.0057,
"step": 898
},
{
"epoch": 4.342995169082125,
"grad_norm": 0.05671832337975502,
"learning_rate": 9.835757658321533e-05,
"loss": 0.0114,
"step": 899
},
{
"epoch": 4.3478260869565215,
"grad_norm": 0.06673644483089447,
"learning_rate": 9.834899144971025e-05,
"loss": 0.0079,
"step": 900
},
{
"epoch": 4.3478260869565215,
"eval_loss": 0.009663479402661324,
"eval_runtime": 20.3004,
"eval_samples_per_second": 4.926,
"eval_steps_per_second": 0.148,
"step": 900
},
{
"epoch": 4.352657004830918,
"grad_norm": 0.046011269092559814,
"learning_rate": 9.834038431357129e-05,
"loss": 0.0067,
"step": 901
},
{
"epoch": 4.357487922705314,
"grad_norm": 0.05855799466371536,
"learning_rate": 9.833175517871538e-05,
"loss": 0.0062,
"step": 902
},
{
"epoch": 4.36231884057971,
"grad_norm": 0.04528895020484924,
"learning_rate": 9.832310404906946e-05,
"loss": 0.0087,
"step": 903
},
{
"epoch": 4.367149758454106,
"grad_norm": 0.07774829864501953,
"learning_rate": 9.831443092857049e-05,
"loss": 0.0069,
"step": 904
},
{
"epoch": 4.371980676328502,
"grad_norm": 0.06823405623435974,
"learning_rate": 9.830573582116542e-05,
"loss": 0.0146,
"step": 905
},
{
"epoch": 4.3768115942028984,
"grad_norm": 0.05296029895544052,
"learning_rate": 9.829701873081122e-05,
"loss": 0.0094,
"step": 906
},
{
"epoch": 4.381642512077295,
"grad_norm": 0.05371993035078049,
"learning_rate": 9.828827966147485e-05,
"loss": 0.0075,
"step": 907
},
{
"epoch": 4.386473429951691,
"grad_norm": 0.04041058197617531,
"learning_rate": 9.827951861713329e-05,
"loss": 0.0063,
"step": 908
},
{
"epoch": 4.391304347826087,
"grad_norm": 0.04803326725959778,
"learning_rate": 9.827073560177351e-05,
"loss": 0.0068,
"step": 909
},
{
"epoch": 4.396135265700483,
"grad_norm": 0.05994880944490433,
"learning_rate": 9.826193061939249e-05,
"loss": 0.0074,
"step": 910
},
{
"epoch": 4.400966183574879,
"grad_norm": 0.04290630295872688,
"learning_rate": 9.825310367399716e-05,
"loss": 0.0097,
"step": 911
},
{
"epoch": 4.405797101449275,
"grad_norm": 0.0505148209631443,
"learning_rate": 9.824425476960453e-05,
"loss": 0.0069,
"step": 912
},
{
"epoch": 4.4106280193236715,
"grad_norm": 0.04741643741726875,
"learning_rate": 9.823538391024151e-05,
"loss": 0.006,
"step": 913
},
{
"epoch": 4.415458937198068,
"grad_norm": 0.0431365892291069,
"learning_rate": 9.822649109994508e-05,
"loss": 0.0064,
"step": 914
},
{
"epoch": 4.420289855072464,
"grad_norm": 0.05312202498316765,
"learning_rate": 9.821757634276217e-05,
"loss": 0.0074,
"step": 915
},
{
"epoch": 4.42512077294686,
"grad_norm": 0.05456024035811424,
"learning_rate": 9.820863964274969e-05,
"loss": 0.0053,
"step": 916
},
{
"epoch": 4.429951690821256,
"grad_norm": 0.043725527822971344,
"learning_rate": 9.819968100397455e-05,
"loss": 0.008,
"step": 917
},
{
"epoch": 4.434782608695652,
"grad_norm": 0.05771199241280556,
"learning_rate": 9.819070043051366e-05,
"loss": 0.0071,
"step": 918
},
{
"epoch": 4.4396135265700485,
"grad_norm": 0.07217767834663391,
"learning_rate": 9.818169792645388e-05,
"loss": 0.005,
"step": 919
},
{
"epoch": 4.444444444444445,
"grad_norm": 0.14544545114040375,
"learning_rate": 9.817267349589205e-05,
"loss": 0.0075,
"step": 920
},
{
"epoch": 4.449275362318841,
"grad_norm": 0.034144334495067596,
"learning_rate": 9.816362714293504e-05,
"loss": 0.0056,
"step": 921
},
{
"epoch": 4.454106280193237,
"grad_norm": 0.04955586418509483,
"learning_rate": 9.815455887169965e-05,
"loss": 0.006,
"step": 922
},
{
"epoch": 4.458937198067633,
"grad_norm": 0.04086367413401604,
"learning_rate": 9.814546868631264e-05,
"loss": 0.0063,
"step": 923
},
{
"epoch": 4.463768115942029,
"grad_norm": 0.03800726681947708,
"learning_rate": 9.813635659091078e-05,
"loss": 0.007,
"step": 924
},
{
"epoch": 4.468599033816425,
"grad_norm": 0.06027813255786896,
"learning_rate": 9.81272225896408e-05,
"loss": 0.0081,
"step": 925
},
{
"epoch": 4.468599033816425,
"eval_loss": 0.010265327990055084,
"eval_runtime": 20.306,
"eval_samples_per_second": 4.925,
"eval_steps_per_second": 0.148,
"step": 925
},
{
"epoch": 4.468599033816425,
"step": 925,
"total_flos": 3.488842745919701e+18,
"train_loss": 0.025700105609926017,
"train_runtime": 24639.0069,
"train_samples_per_second": 10.045,
"train_steps_per_second": 0.21
}
],
"logging_steps": 1,
"max_steps": 5175,
"num_input_tokens_seen": 0,
"num_train_epochs": 25,
"save_steps": 100,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 4
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.488842745919701e+18,
"train_batch_size": 48,
"trial_name": null,
"trial_params": null
}