blood34's picture
Training in progress, step 500, checkpoint
ab0a3a5 verified
{
"best_metric": 1.7225215435028076,
"best_model_checkpoint": "miner_id_24/checkpoint-500",
"epoch": 0.09961647656522389,
"eval_steps": 100,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00019923295313044778,
"grad_norm": 1.2004849910736084,
"learning_rate": 5e-06,
"loss": 2.9446,
"step": 1
},
{
"epoch": 0.00019923295313044778,
"eval_loss": 4.056490898132324,
"eval_runtime": 1316.2296,
"eval_samples_per_second": 6.423,
"eval_steps_per_second": 1.606,
"step": 1
},
{
"epoch": 0.00039846590626089557,
"grad_norm": 1.6586755514144897,
"learning_rate": 1e-05,
"loss": 3.5137,
"step": 2
},
{
"epoch": 0.0005976988593913434,
"grad_norm": 1.1276801824569702,
"learning_rate": 1.5e-05,
"loss": 2.5317,
"step": 3
},
{
"epoch": 0.0007969318125217911,
"grad_norm": 1.4570177793502808,
"learning_rate": 2e-05,
"loss": 3.2148,
"step": 4
},
{
"epoch": 0.0009961647656522388,
"grad_norm": 1.695500135421753,
"learning_rate": 2.5e-05,
"loss": 3.2779,
"step": 5
},
{
"epoch": 0.0011953977187826868,
"grad_norm": 1.5533411502838135,
"learning_rate": 3e-05,
"loss": 3.4182,
"step": 6
},
{
"epoch": 0.0013946306719131345,
"grad_norm": 1.5567388534545898,
"learning_rate": 3.5e-05,
"loss": 3.0221,
"step": 7
},
{
"epoch": 0.0015938636250435823,
"grad_norm": 1.6326138973236084,
"learning_rate": 4e-05,
"loss": 3.1334,
"step": 8
},
{
"epoch": 0.00179309657817403,
"grad_norm": 1.7711933851242065,
"learning_rate": 4.5e-05,
"loss": 3.0372,
"step": 9
},
{
"epoch": 0.0019923295313044776,
"grad_norm": 1.850273609161377,
"learning_rate": 5e-05,
"loss": 2.891,
"step": 10
},
{
"epoch": 0.0021915624844349255,
"grad_norm": 1.782905101776123,
"learning_rate": 5.500000000000001e-05,
"loss": 2.7233,
"step": 11
},
{
"epoch": 0.0023907954375653735,
"grad_norm": 1.5795241594314575,
"learning_rate": 6e-05,
"loss": 2.9004,
"step": 12
},
{
"epoch": 0.002590028390695821,
"grad_norm": 1.513824224472046,
"learning_rate": 6.500000000000001e-05,
"loss": 2.299,
"step": 13
},
{
"epoch": 0.002789261343826269,
"grad_norm": 1.6560556888580322,
"learning_rate": 7e-05,
"loss": 2.1206,
"step": 14
},
{
"epoch": 0.0029884942969567166,
"grad_norm": 1.761472463607788,
"learning_rate": 7.500000000000001e-05,
"loss": 2.3681,
"step": 15
},
{
"epoch": 0.0031877272500871645,
"grad_norm": 2.254106044769287,
"learning_rate": 8e-05,
"loss": 2.3073,
"step": 16
},
{
"epoch": 0.003386960203217612,
"grad_norm": 1.6817950010299683,
"learning_rate": 8.5e-05,
"loss": 2.1039,
"step": 17
},
{
"epoch": 0.00358619315634806,
"grad_norm": 1.5102214813232422,
"learning_rate": 9e-05,
"loss": 2.1165,
"step": 18
},
{
"epoch": 0.0037854261094785076,
"grad_norm": 1.2089601755142212,
"learning_rate": 9.5e-05,
"loss": 2.0751,
"step": 19
},
{
"epoch": 0.003984659062608955,
"grad_norm": 1.210183024406433,
"learning_rate": 0.0001,
"loss": 1.8907,
"step": 20
},
{
"epoch": 0.0041838920157394035,
"grad_norm": 1.2759207487106323,
"learning_rate": 9.999892908320647e-05,
"loss": 1.8462,
"step": 21
},
{
"epoch": 0.004383124968869851,
"grad_norm": 3.2929604053497314,
"learning_rate": 9.999571637870036e-05,
"loss": 1.864,
"step": 22
},
{
"epoch": 0.004582357922000299,
"grad_norm": 1.1962004899978638,
"learning_rate": 9.999036202410325e-05,
"loss": 1.7009,
"step": 23
},
{
"epoch": 0.004781590875130747,
"grad_norm": 1.1904807090759277,
"learning_rate": 9.998286624877786e-05,
"loss": 2.0888,
"step": 24
},
{
"epoch": 0.0049808238282611945,
"grad_norm": 3.4276819229125977,
"learning_rate": 9.997322937381829e-05,
"loss": 1.858,
"step": 25
},
{
"epoch": 0.005180056781391642,
"grad_norm": 1.3617855310440063,
"learning_rate": 9.996145181203615e-05,
"loss": 2.1621,
"step": 26
},
{
"epoch": 0.00537928973452209,
"grad_norm": 1.9927242994308472,
"learning_rate": 9.994753406794301e-05,
"loss": 1.8291,
"step": 27
},
{
"epoch": 0.005578522687652538,
"grad_norm": 1.01543128490448,
"learning_rate": 9.99314767377287e-05,
"loss": 1.9078,
"step": 28
},
{
"epoch": 0.005777755640782986,
"grad_norm": 1.1790841817855835,
"learning_rate": 9.991328050923581e-05,
"loss": 1.8481,
"step": 29
},
{
"epoch": 0.005976988593913433,
"grad_norm": 1.0684229135513306,
"learning_rate": 9.989294616193017e-05,
"loss": 1.9119,
"step": 30
},
{
"epoch": 0.006176221547043881,
"grad_norm": 1.042448878288269,
"learning_rate": 9.98704745668676e-05,
"loss": 1.8315,
"step": 31
},
{
"epoch": 0.006375454500174329,
"grad_norm": 1.063533902168274,
"learning_rate": 9.98458666866564e-05,
"loss": 1.9271,
"step": 32
},
{
"epoch": 0.006574687453304777,
"grad_norm": 0.9447396993637085,
"learning_rate": 9.981912357541627e-05,
"loss": 1.9347,
"step": 33
},
{
"epoch": 0.006773920406435224,
"grad_norm": 1.0030559301376343,
"learning_rate": 9.97902463787331e-05,
"loss": 2.1377,
"step": 34
},
{
"epoch": 0.0069731533595656725,
"grad_norm": 0.9730034470558167,
"learning_rate": 9.975923633360985e-05,
"loss": 1.869,
"step": 35
},
{
"epoch": 0.00717238631269612,
"grad_norm": 0.9906373023986816,
"learning_rate": 9.972609476841367e-05,
"loss": 2.0613,
"step": 36
},
{
"epoch": 0.007371619265826568,
"grad_norm": 1.1251929998397827,
"learning_rate": 9.969082310281891e-05,
"loss": 2.3343,
"step": 37
},
{
"epoch": 0.007570852218957015,
"grad_norm": 1.1643787622451782,
"learning_rate": 9.965342284774632e-05,
"loss": 2.0388,
"step": 38
},
{
"epoch": 0.0077700851720874636,
"grad_norm": 1.09977388381958,
"learning_rate": 9.961389560529836e-05,
"loss": 2.2025,
"step": 39
},
{
"epoch": 0.00796931812521791,
"grad_norm": 1.0421956777572632,
"learning_rate": 9.957224306869053e-05,
"loss": 1.9667,
"step": 40
},
{
"epoch": 0.008168551078348359,
"grad_norm": 1.134215235710144,
"learning_rate": 9.952846702217886e-05,
"loss": 2.1981,
"step": 41
},
{
"epoch": 0.008367784031478807,
"grad_norm": 1.1163185834884644,
"learning_rate": 9.948256934098352e-05,
"loss": 2.1145,
"step": 42
},
{
"epoch": 0.008567016984609254,
"grad_norm": 1.0088622570037842,
"learning_rate": 9.943455199120837e-05,
"loss": 2.0563,
"step": 43
},
{
"epoch": 0.008766249937739702,
"grad_norm": 1.0823618173599243,
"learning_rate": 9.938441702975689e-05,
"loss": 1.9748,
"step": 44
},
{
"epoch": 0.00896548289087015,
"grad_norm": 1.0469329357147217,
"learning_rate": 9.933216660424395e-05,
"loss": 2.1169,
"step": 45
},
{
"epoch": 0.009164715844000597,
"grad_norm": 1.0795198678970337,
"learning_rate": 9.927780295290389e-05,
"loss": 2.0546,
"step": 46
},
{
"epoch": 0.009363948797131046,
"grad_norm": 1.2332130670547485,
"learning_rate": 9.922132840449459e-05,
"loss": 2.3422,
"step": 47
},
{
"epoch": 0.009563181750261494,
"grad_norm": 1.0598793029785156,
"learning_rate": 9.916274537819775e-05,
"loss": 2.0388,
"step": 48
},
{
"epoch": 0.00976241470339194,
"grad_norm": 1.042548418045044,
"learning_rate": 9.91020563835152e-05,
"loss": 2.0313,
"step": 49
},
{
"epoch": 0.009961647656522389,
"grad_norm": 1.0272207260131836,
"learning_rate": 9.903926402016153e-05,
"loss": 2.1789,
"step": 50
},
{
"epoch": 0.010160880609652836,
"grad_norm": 1.0316256284713745,
"learning_rate": 9.897437097795257e-05,
"loss": 1.6957,
"step": 51
},
{
"epoch": 0.010360113562783284,
"grad_norm": 0.9458410739898682,
"learning_rate": 9.890738003669029e-05,
"loss": 1.5179,
"step": 52
},
{
"epoch": 0.010559346515913733,
"grad_norm": 0.9019896388053894,
"learning_rate": 9.883829406604363e-05,
"loss": 1.8115,
"step": 53
},
{
"epoch": 0.01075857946904418,
"grad_norm": 0.7449951767921448,
"learning_rate": 9.876711602542563e-05,
"loss": 1.4849,
"step": 54
},
{
"epoch": 0.010957812422174628,
"grad_norm": 0.690968930721283,
"learning_rate": 9.869384896386668e-05,
"loss": 1.4004,
"step": 55
},
{
"epoch": 0.011157045375305076,
"grad_norm": 0.6796329021453857,
"learning_rate": 9.861849601988383e-05,
"loss": 1.1753,
"step": 56
},
{
"epoch": 0.011356278328435523,
"grad_norm": 0.6940993070602417,
"learning_rate": 9.854106042134641e-05,
"loss": 1.3674,
"step": 57
},
{
"epoch": 0.011555511281565971,
"grad_norm": 0.7557864189147949,
"learning_rate": 9.846154548533773e-05,
"loss": 1.4927,
"step": 58
},
{
"epoch": 0.01175474423469642,
"grad_norm": 0.7977707982063293,
"learning_rate": 9.837995461801299e-05,
"loss": 1.4762,
"step": 59
},
{
"epoch": 0.011953977187826866,
"grad_norm": 0.889670193195343,
"learning_rate": 9.829629131445342e-05,
"loss": 1.7018,
"step": 60
},
{
"epoch": 0.012153210140957315,
"grad_norm": 0.8150140047073364,
"learning_rate": 9.821055915851647e-05,
"loss": 1.5165,
"step": 61
},
{
"epoch": 0.012352443094087761,
"grad_norm": 0.8231312036514282,
"learning_rate": 9.812276182268236e-05,
"loss": 1.8683,
"step": 62
},
{
"epoch": 0.01255167604721821,
"grad_norm": 0.7673140168190002,
"learning_rate": 9.803290306789676e-05,
"loss": 1.483,
"step": 63
},
{
"epoch": 0.012750909000348658,
"grad_norm": 0.8216698169708252,
"learning_rate": 9.794098674340965e-05,
"loss": 1.6038,
"step": 64
},
{
"epoch": 0.012950141953479105,
"grad_norm": 0.7474381327629089,
"learning_rate": 9.784701678661045e-05,
"loss": 1.4957,
"step": 65
},
{
"epoch": 0.013149374906609553,
"grad_norm": 0.7596239447593689,
"learning_rate": 9.775099722285935e-05,
"loss": 1.7015,
"step": 66
},
{
"epoch": 0.013348607859740002,
"grad_norm": 0.6952986717224121,
"learning_rate": 9.765293216531486e-05,
"loss": 1.5342,
"step": 67
},
{
"epoch": 0.013547840812870448,
"grad_norm": 0.9067381024360657,
"learning_rate": 9.755282581475769e-05,
"loss": 1.8279,
"step": 68
},
{
"epoch": 0.013747073766000897,
"grad_norm": 0.8189777135848999,
"learning_rate": 9.74506824594107e-05,
"loss": 1.5812,
"step": 69
},
{
"epoch": 0.013946306719131345,
"grad_norm": 0.8380293846130371,
"learning_rate": 9.73465064747553e-05,
"loss": 1.7342,
"step": 70
},
{
"epoch": 0.014145539672261792,
"grad_norm": 0.8201037645339966,
"learning_rate": 9.724030232334391e-05,
"loss": 1.6843,
"step": 71
},
{
"epoch": 0.01434477262539224,
"grad_norm": 0.971545398235321,
"learning_rate": 9.713207455460894e-05,
"loss": 1.7146,
"step": 72
},
{
"epoch": 0.014544005578522687,
"grad_norm": 1.0521457195281982,
"learning_rate": 9.702182780466775e-05,
"loss": 1.8588,
"step": 73
},
{
"epoch": 0.014743238531653135,
"grad_norm": 0.8414843082427979,
"learning_rate": 9.690956679612421e-05,
"loss": 1.7867,
"step": 74
},
{
"epoch": 0.014942471484783584,
"grad_norm": 0.9837526082992554,
"learning_rate": 9.67952963378663e-05,
"loss": 1.8164,
"step": 75
},
{
"epoch": 0.01514170443791403,
"grad_norm": 0.7853424549102783,
"learning_rate": 9.667902132486009e-05,
"loss": 1.5892,
"step": 76
},
{
"epoch": 0.015340937391044479,
"grad_norm": 0.8817009925842285,
"learning_rate": 9.656074673794018e-05,
"loss": 1.6805,
"step": 77
},
{
"epoch": 0.015540170344174927,
"grad_norm": 0.8272745609283447,
"learning_rate": 9.644047764359622e-05,
"loss": 1.6335,
"step": 78
},
{
"epoch": 0.015739403297305374,
"grad_norm": 1.0323444604873657,
"learning_rate": 9.631821919375591e-05,
"loss": 1.9216,
"step": 79
},
{
"epoch": 0.01593863625043582,
"grad_norm": 0.7793384790420532,
"learning_rate": 9.619397662556435e-05,
"loss": 1.6618,
"step": 80
},
{
"epoch": 0.01613786920356627,
"grad_norm": 0.8447017073631287,
"learning_rate": 9.606775526115963e-05,
"loss": 1.934,
"step": 81
},
{
"epoch": 0.016337102156696717,
"grad_norm": 0.7522584199905396,
"learning_rate": 9.593956050744492e-05,
"loss": 1.8898,
"step": 82
},
{
"epoch": 0.016536335109827164,
"grad_norm": 0.971231997013092,
"learning_rate": 9.580939785585681e-05,
"loss": 1.9143,
"step": 83
},
{
"epoch": 0.016735568062957614,
"grad_norm": 0.8041723966598511,
"learning_rate": 9.567727288213005e-05,
"loss": 1.7362,
"step": 84
},
{
"epoch": 0.01693480101608806,
"grad_norm": 0.8742603659629822,
"learning_rate": 9.554319124605879e-05,
"loss": 2.0349,
"step": 85
},
{
"epoch": 0.017134033969218507,
"grad_norm": 0.8700699210166931,
"learning_rate": 9.540715869125407e-05,
"loss": 1.7798,
"step": 86
},
{
"epoch": 0.017333266922348958,
"grad_norm": 1.0314463376998901,
"learning_rate": 9.526918104489777e-05,
"loss": 1.9499,
"step": 87
},
{
"epoch": 0.017532499875479404,
"grad_norm": 0.9694502949714661,
"learning_rate": 9.512926421749304e-05,
"loss": 2.1316,
"step": 88
},
{
"epoch": 0.01773173282860985,
"grad_norm": 0.9032630920410156,
"learning_rate": 9.498741420261108e-05,
"loss": 2.1217,
"step": 89
},
{
"epoch": 0.0179309657817403,
"grad_norm": 0.9292871356010437,
"learning_rate": 9.484363707663442e-05,
"loss": 1.878,
"step": 90
},
{
"epoch": 0.018130198734870748,
"grad_norm": 0.9102398157119751,
"learning_rate": 9.469793899849661e-05,
"loss": 1.8715,
"step": 91
},
{
"epoch": 0.018329431688001194,
"grad_norm": 1.0135326385498047,
"learning_rate": 9.45503262094184e-05,
"loss": 2.2684,
"step": 92
},
{
"epoch": 0.018528664641131645,
"grad_norm": 0.9862831830978394,
"learning_rate": 9.440080503264037e-05,
"loss": 2.1933,
"step": 93
},
{
"epoch": 0.01872789759426209,
"grad_norm": 0.8928501009941101,
"learning_rate": 9.42493818731521e-05,
"loss": 1.9306,
"step": 94
},
{
"epoch": 0.018927130547392538,
"grad_norm": 1.0035126209259033,
"learning_rate": 9.409606321741775e-05,
"loss": 2.1338,
"step": 95
},
{
"epoch": 0.019126363500522988,
"grad_norm": 1.0360318422317505,
"learning_rate": 9.394085563309827e-05,
"loss": 2.0244,
"step": 96
},
{
"epoch": 0.019325596453653435,
"grad_norm": 1.0765694379806519,
"learning_rate": 9.378376576876999e-05,
"loss": 2.0452,
"step": 97
},
{
"epoch": 0.01952482940678388,
"grad_norm": 1.0682713985443115,
"learning_rate": 9.362480035363986e-05,
"loss": 1.9638,
"step": 98
},
{
"epoch": 0.01972406235991433,
"grad_norm": 0.9798291325569153,
"learning_rate": 9.34639661972572e-05,
"loss": 1.8321,
"step": 99
},
{
"epoch": 0.019923295313044778,
"grad_norm": 1.2100462913513184,
"learning_rate": 9.330127018922194e-05,
"loss": 2.1887,
"step": 100
},
{
"epoch": 0.019923295313044778,
"eval_loss": 1.8207975625991821,
"eval_runtime": 1307.2968,
"eval_samples_per_second": 6.467,
"eval_steps_per_second": 1.617,
"step": 100
},
{
"epoch": 0.020122528266175225,
"grad_norm": 0.8175336718559265,
"learning_rate": 9.31367192988896e-05,
"loss": 1.3173,
"step": 101
},
{
"epoch": 0.02032176121930567,
"grad_norm": 0.9728506803512573,
"learning_rate": 9.297032057507264e-05,
"loss": 1.8635,
"step": 102
},
{
"epoch": 0.02052099417243612,
"grad_norm": 0.7298185229301453,
"learning_rate": 9.280208114573859e-05,
"loss": 1.5464,
"step": 103
},
{
"epoch": 0.02072022712556657,
"grad_norm": 0.7254154086112976,
"learning_rate": 9.263200821770461e-05,
"loss": 1.3951,
"step": 104
},
{
"epoch": 0.020919460078697015,
"grad_norm": 0.8585533499717712,
"learning_rate": 9.246010907632895e-05,
"loss": 1.9375,
"step": 105
},
{
"epoch": 0.021118693031827465,
"grad_norm": 0.672656774520874,
"learning_rate": 9.228639108519868e-05,
"loss": 1.7358,
"step": 106
},
{
"epoch": 0.021317925984957912,
"grad_norm": 0.6700272560119629,
"learning_rate": 9.211086168581433e-05,
"loss": 1.373,
"step": 107
},
{
"epoch": 0.02151715893808836,
"grad_norm": 0.6741132736206055,
"learning_rate": 9.193352839727121e-05,
"loss": 1.472,
"step": 108
},
{
"epoch": 0.02171639189121881,
"grad_norm": 0.7094087600708008,
"learning_rate": 9.175439881593716e-05,
"loss": 1.4418,
"step": 109
},
{
"epoch": 0.021915624844349255,
"grad_norm": 0.6957412958145142,
"learning_rate": 9.157348061512727e-05,
"loss": 1.3553,
"step": 110
},
{
"epoch": 0.022114857797479702,
"grad_norm": 0.81883704662323,
"learning_rate": 9.139078154477512e-05,
"loss": 1.4875,
"step": 111
},
{
"epoch": 0.022314090750610152,
"grad_norm": 0.6015892624855042,
"learning_rate": 9.120630943110077e-05,
"loss": 1.3376,
"step": 112
},
{
"epoch": 0.0225133237037406,
"grad_norm": 0.7218628525733948,
"learning_rate": 9.102007217627568e-05,
"loss": 1.5389,
"step": 113
},
{
"epoch": 0.022712556656871045,
"grad_norm": 0.6947839260101318,
"learning_rate": 9.083207775808396e-05,
"loss": 1.6291,
"step": 114
},
{
"epoch": 0.022911789610001496,
"grad_norm": 0.8209601044654846,
"learning_rate": 9.064233422958077e-05,
"loss": 1.5964,
"step": 115
},
{
"epoch": 0.023111022563131942,
"grad_norm": 0.7330240607261658,
"learning_rate": 9.045084971874738e-05,
"loss": 1.607,
"step": 116
},
{
"epoch": 0.02331025551626239,
"grad_norm": 0.6640767455101013,
"learning_rate": 9.025763242814291e-05,
"loss": 1.4805,
"step": 117
},
{
"epoch": 0.02350948846939284,
"grad_norm": 0.6990946531295776,
"learning_rate": 9.006269063455304e-05,
"loss": 1.3911,
"step": 118
},
{
"epoch": 0.023708721422523286,
"grad_norm": 0.6661984324455261,
"learning_rate": 8.986603268863536e-05,
"loss": 1.4391,
"step": 119
},
{
"epoch": 0.023907954375653732,
"grad_norm": 0.7727532386779785,
"learning_rate": 8.966766701456177e-05,
"loss": 1.6384,
"step": 120
},
{
"epoch": 0.024107187328784183,
"grad_norm": 0.8111420273780823,
"learning_rate": 8.94676021096575e-05,
"loss": 1.856,
"step": 121
},
{
"epoch": 0.02430642028191463,
"grad_norm": 0.7776418924331665,
"learning_rate": 8.926584654403724e-05,
"loss": 1.5784,
"step": 122
},
{
"epoch": 0.024505653235045076,
"grad_norm": 0.8065217733383179,
"learning_rate": 8.906240896023794e-05,
"loss": 1.6247,
"step": 123
},
{
"epoch": 0.024704886188175523,
"grad_norm": 0.7631449103355408,
"learning_rate": 8.885729807284856e-05,
"loss": 1.658,
"step": 124
},
{
"epoch": 0.024904119141305973,
"grad_norm": 0.8057156801223755,
"learning_rate": 8.865052266813685e-05,
"loss": 1.7705,
"step": 125
},
{
"epoch": 0.02510335209443642,
"grad_norm": 0.8324928879737854,
"learning_rate": 8.844209160367299e-05,
"loss": 1.7019,
"step": 126
},
{
"epoch": 0.025302585047566866,
"grad_norm": 0.9295965433120728,
"learning_rate": 8.823201380795001e-05,
"loss": 1.9604,
"step": 127
},
{
"epoch": 0.025501818000697316,
"grad_norm": 0.80018550157547,
"learning_rate": 8.802029828000156e-05,
"loss": 1.6104,
"step": 128
},
{
"epoch": 0.025701050953827763,
"grad_norm": 0.8858182430267334,
"learning_rate": 8.780695408901613e-05,
"loss": 1.805,
"step": 129
},
{
"epoch": 0.02590028390695821,
"grad_norm": 0.6917589902877808,
"learning_rate": 8.759199037394887e-05,
"loss": 1.5116,
"step": 130
},
{
"epoch": 0.02609951686008866,
"grad_norm": 0.8768478631973267,
"learning_rate": 8.737541634312985e-05,
"loss": 1.9037,
"step": 131
},
{
"epoch": 0.026298749813219106,
"grad_norm": 0.7829615473747253,
"learning_rate": 8.715724127386972e-05,
"loss": 1.7264,
"step": 132
},
{
"epoch": 0.026497982766349553,
"grad_norm": 0.7925187349319458,
"learning_rate": 8.693747451206232e-05,
"loss": 1.7334,
"step": 133
},
{
"epoch": 0.026697215719480003,
"grad_norm": 0.821109414100647,
"learning_rate": 8.671612547178428e-05,
"loss": 1.8394,
"step": 134
},
{
"epoch": 0.02689644867261045,
"grad_norm": 0.8298285007476807,
"learning_rate": 8.649320363489179e-05,
"loss": 1.8602,
"step": 135
},
{
"epoch": 0.027095681625740897,
"grad_norm": 0.8185926079750061,
"learning_rate": 8.626871855061438e-05,
"loss": 1.8013,
"step": 136
},
{
"epoch": 0.027294914578871347,
"grad_norm": 0.9280014038085938,
"learning_rate": 8.604267983514594e-05,
"loss": 1.9981,
"step": 137
},
{
"epoch": 0.027494147532001793,
"grad_norm": 0.9310111403465271,
"learning_rate": 8.581509717123273e-05,
"loss": 2.0068,
"step": 138
},
{
"epoch": 0.02769338048513224,
"grad_norm": 0.907690703868866,
"learning_rate": 8.558598030775857e-05,
"loss": 1.9491,
"step": 139
},
{
"epoch": 0.02789261343826269,
"grad_norm": 0.9870088696479797,
"learning_rate": 8.535533905932738e-05,
"loss": 2.1852,
"step": 140
},
{
"epoch": 0.028091846391393137,
"grad_norm": 0.8817322850227356,
"learning_rate": 8.51231833058426e-05,
"loss": 2.049,
"step": 141
},
{
"epoch": 0.028291079344523583,
"grad_norm": 1.048509955406189,
"learning_rate": 8.488952299208401e-05,
"loss": 1.9654,
"step": 142
},
{
"epoch": 0.028490312297654034,
"grad_norm": 0.8694697618484497,
"learning_rate": 8.46543681272818e-05,
"loss": 1.9318,
"step": 143
},
{
"epoch": 0.02868954525078448,
"grad_norm": 0.8193637728691101,
"learning_rate": 8.44177287846877e-05,
"loss": 1.6541,
"step": 144
},
{
"epoch": 0.028888778203914927,
"grad_norm": 1.0921413898468018,
"learning_rate": 8.417961510114356e-05,
"loss": 2.1767,
"step": 145
},
{
"epoch": 0.029088011157045374,
"grad_norm": 1.0805463790893555,
"learning_rate": 8.39400372766471e-05,
"loss": 2.2352,
"step": 146
},
{
"epoch": 0.029287244110175824,
"grad_norm": 1.1638261079788208,
"learning_rate": 8.36990055739149e-05,
"loss": 2.2381,
"step": 147
},
{
"epoch": 0.02948647706330627,
"grad_norm": 1.0771574974060059,
"learning_rate": 8.345653031794292e-05,
"loss": 2.2466,
"step": 148
},
{
"epoch": 0.029685710016436717,
"grad_norm": 0.8697289228439331,
"learning_rate": 8.321262189556409e-05,
"loss": 1.8392,
"step": 149
},
{
"epoch": 0.029884942969567167,
"grad_norm": 1.0429342985153198,
"learning_rate": 8.296729075500344e-05,
"loss": 2.0501,
"step": 150
},
{
"epoch": 0.030084175922697614,
"grad_norm": 0.8660073280334473,
"learning_rate": 8.272054740543052e-05,
"loss": 1.5289,
"step": 151
},
{
"epoch": 0.03028340887582806,
"grad_norm": 0.7414923310279846,
"learning_rate": 8.247240241650918e-05,
"loss": 1.448,
"step": 152
},
{
"epoch": 0.03048264182895851,
"grad_norm": 0.7929139733314514,
"learning_rate": 8.222286641794488e-05,
"loss": 1.6223,
"step": 153
},
{
"epoch": 0.030681874782088957,
"grad_norm": 0.9731504321098328,
"learning_rate": 8.197195009902924e-05,
"loss": 1.6532,
"step": 154
},
{
"epoch": 0.030881107735219404,
"grad_norm": 0.6364417672157288,
"learning_rate": 8.171966420818228e-05,
"loss": 1.2613,
"step": 155
},
{
"epoch": 0.031080340688349854,
"grad_norm": 0.7185880541801453,
"learning_rate": 8.146601955249188e-05,
"loss": 1.5937,
"step": 156
},
{
"epoch": 0.0312795736414803,
"grad_norm": 0.6489309072494507,
"learning_rate": 8.121102699725089e-05,
"loss": 1.4197,
"step": 157
},
{
"epoch": 0.03147880659461075,
"grad_norm": 0.800514817237854,
"learning_rate": 8.095469746549172e-05,
"loss": 1.6741,
"step": 158
},
{
"epoch": 0.0316780395477412,
"grad_norm": 0.7517793774604797,
"learning_rate": 8.069704193751832e-05,
"loss": 1.554,
"step": 159
},
{
"epoch": 0.03187727250087164,
"grad_norm": 0.7281696200370789,
"learning_rate": 8.043807145043604e-05,
"loss": 1.5014,
"step": 160
},
{
"epoch": 0.03207650545400209,
"grad_norm": 0.7210339307785034,
"learning_rate": 8.017779709767858e-05,
"loss": 1.4519,
"step": 161
},
{
"epoch": 0.03227573840713254,
"grad_norm": 0.8253017663955688,
"learning_rate": 7.991623002853296e-05,
"loss": 1.5572,
"step": 162
},
{
"epoch": 0.032474971360262984,
"grad_norm": 0.7311911582946777,
"learning_rate": 7.965338144766186e-05,
"loss": 1.6551,
"step": 163
},
{
"epoch": 0.032674204313393435,
"grad_norm": 0.6998488306999207,
"learning_rate": 7.938926261462366e-05,
"loss": 1.5228,
"step": 164
},
{
"epoch": 0.032873437266523885,
"grad_norm": 0.8242675065994263,
"learning_rate": 7.912388484339012e-05,
"loss": 1.6667,
"step": 165
},
{
"epoch": 0.03307267021965433,
"grad_norm": 0.8666072487831116,
"learning_rate": 7.88572595018617e-05,
"loss": 1.6337,
"step": 166
},
{
"epoch": 0.03327190317278478,
"grad_norm": 0.7214533686637878,
"learning_rate": 7.858939801138061e-05,
"loss": 1.5181,
"step": 167
},
{
"epoch": 0.03347113612591523,
"grad_norm": 0.7866557836532593,
"learning_rate": 7.832031184624164e-05,
"loss": 1.693,
"step": 168
},
{
"epoch": 0.03367036907904567,
"grad_norm": 0.715961217880249,
"learning_rate": 7.80500125332005e-05,
"loss": 1.5668,
"step": 169
},
{
"epoch": 0.03386960203217612,
"grad_norm": 0.891643762588501,
"learning_rate": 7.777851165098012e-05,
"loss": 1.9778,
"step": 170
},
{
"epoch": 0.03406883498530657,
"grad_norm": 0.8939557075500488,
"learning_rate": 7.750582082977467e-05,
"loss": 1.8125,
"step": 171
},
{
"epoch": 0.034268067938437015,
"grad_norm": 0.7379950881004333,
"learning_rate": 7.723195175075136e-05,
"loss": 1.6326,
"step": 172
},
{
"epoch": 0.034467300891567465,
"grad_norm": 0.7286401987075806,
"learning_rate": 7.695691614555003e-05,
"loss": 1.7061,
"step": 173
},
{
"epoch": 0.034666533844697915,
"grad_norm": 0.7566738724708557,
"learning_rate": 7.668072579578058e-05,
"loss": 1.7648,
"step": 174
},
{
"epoch": 0.03486576679782836,
"grad_norm": 0.8034036755561829,
"learning_rate": 7.64033925325184e-05,
"loss": 1.7197,
"step": 175
},
{
"epoch": 0.03506499975095881,
"grad_norm": 0.8322769403457642,
"learning_rate": 7.612492823579745e-05,
"loss": 1.8101,
"step": 176
},
{
"epoch": 0.03526423270408926,
"grad_norm": 0.8380805850028992,
"learning_rate": 7.584534483410137e-05,
"loss": 1.7692,
"step": 177
},
{
"epoch": 0.0354634656572197,
"grad_norm": 0.8168779015541077,
"learning_rate": 7.55646543038526e-05,
"loss": 1.8212,
"step": 178
},
{
"epoch": 0.03566269861035015,
"grad_norm": 0.8079612851142883,
"learning_rate": 7.528286866889924e-05,
"loss": 1.6029,
"step": 179
},
{
"epoch": 0.0358619315634806,
"grad_norm": 0.8072683811187744,
"learning_rate": 7.500000000000001e-05,
"loss": 1.8446,
"step": 180
},
{
"epoch": 0.036061164516611045,
"grad_norm": 0.7589475512504578,
"learning_rate": 7.471606041430723e-05,
"loss": 1.6799,
"step": 181
},
{
"epoch": 0.036260397469741495,
"grad_norm": 0.8317643404006958,
"learning_rate": 7.443106207484776e-05,
"loss": 1.8887,
"step": 182
},
{
"epoch": 0.036459630422871946,
"grad_norm": 1.1349105834960938,
"learning_rate": 7.414501719000187e-05,
"loss": 1.7912,
"step": 183
},
{
"epoch": 0.03665886337600239,
"grad_norm": 0.7826024889945984,
"learning_rate": 7.385793801298042e-05,
"loss": 1.81,
"step": 184
},
{
"epoch": 0.03685809632913284,
"grad_norm": 0.904198944568634,
"learning_rate": 7.35698368412999e-05,
"loss": 1.6053,
"step": 185
},
{
"epoch": 0.03705732928226329,
"grad_norm": 0.8450482487678528,
"learning_rate": 7.328072601625557e-05,
"loss": 1.7809,
"step": 186
},
{
"epoch": 0.03725656223539373,
"grad_norm": 0.8804753422737122,
"learning_rate": 7.2990617922393e-05,
"loss": 1.9123,
"step": 187
},
{
"epoch": 0.03745579518852418,
"grad_norm": 0.9151681661605835,
"learning_rate": 7.269952498697734e-05,
"loss": 1.8679,
"step": 188
},
{
"epoch": 0.03765502814165463,
"grad_norm": 0.9498852491378784,
"learning_rate": 7.240745967946113e-05,
"loss": 1.9956,
"step": 189
},
{
"epoch": 0.037854261094785076,
"grad_norm": 1.0988324880599976,
"learning_rate": 7.211443451095007e-05,
"loss": 2.0273,
"step": 190
},
{
"epoch": 0.038053494047915526,
"grad_norm": 0.9793733358383179,
"learning_rate": 7.18204620336671e-05,
"loss": 2.0765,
"step": 191
},
{
"epoch": 0.038252727001045976,
"grad_norm": 0.9851742386817932,
"learning_rate": 7.152555484041476e-05,
"loss": 2.0685,
"step": 192
},
{
"epoch": 0.03845195995417642,
"grad_norm": 0.9779745936393738,
"learning_rate": 7.122972556403567e-05,
"loss": 2.0489,
"step": 193
},
{
"epoch": 0.03865119290730687,
"grad_norm": 0.9139167070388794,
"learning_rate": 7.09329868768714e-05,
"loss": 1.948,
"step": 194
},
{
"epoch": 0.03885042586043732,
"grad_norm": 0.9257181286811829,
"learning_rate": 7.063535149021973e-05,
"loss": 1.9178,
"step": 195
},
{
"epoch": 0.03904965881356776,
"grad_norm": 0.8992418050765991,
"learning_rate": 7.033683215379002e-05,
"loss": 1.9382,
"step": 196
},
{
"epoch": 0.03924889176669821,
"grad_norm": 1.0115379095077515,
"learning_rate": 7.003744165515705e-05,
"loss": 1.9663,
"step": 197
},
{
"epoch": 0.03944812471982866,
"grad_norm": 0.9632532596588135,
"learning_rate": 6.973719281921335e-05,
"loss": 2.0153,
"step": 198
},
{
"epoch": 0.039647357672959106,
"grad_norm": 0.9716604351997375,
"learning_rate": 6.943609850761979e-05,
"loss": 2.0656,
"step": 199
},
{
"epoch": 0.039846590626089556,
"grad_norm": 1.0795772075653076,
"learning_rate": 6.91341716182545e-05,
"loss": 1.8788,
"step": 200
},
{
"epoch": 0.039846590626089556,
"eval_loss": 1.7653837203979492,
"eval_runtime": 1307.447,
"eval_samples_per_second": 6.466,
"eval_steps_per_second": 1.617,
"step": 200
},
{
"epoch": 0.04004582357922,
"grad_norm": 0.7175781726837158,
"learning_rate": 6.883142508466054e-05,
"loss": 1.3337,
"step": 201
},
{
"epoch": 0.04024505653235045,
"grad_norm": 0.8620020747184753,
"learning_rate": 6.852787187549182e-05,
"loss": 1.3683,
"step": 202
},
{
"epoch": 0.0404442894854809,
"grad_norm": 0.8307313323020935,
"learning_rate": 6.82235249939575e-05,
"loss": 1.5546,
"step": 203
},
{
"epoch": 0.04064352243861134,
"grad_norm": 0.752736508846283,
"learning_rate": 6.7918397477265e-05,
"loss": 1.6792,
"step": 204
},
{
"epoch": 0.04084275539174179,
"grad_norm": 0.8471193909645081,
"learning_rate": 6.761250239606169e-05,
"loss": 1.6798,
"step": 205
},
{
"epoch": 0.04104198834487224,
"grad_norm": 0.7192975282669067,
"learning_rate": 6.730585285387465e-05,
"loss": 1.5224,
"step": 206
},
{
"epoch": 0.04124122129800269,
"grad_norm": 0.804473876953125,
"learning_rate": 6.699846198654971e-05,
"loss": 1.8085,
"step": 207
},
{
"epoch": 0.04144045425113314,
"grad_norm": 0.7053448557853699,
"learning_rate": 6.669034296168855e-05,
"loss": 1.5087,
"step": 208
},
{
"epoch": 0.04163968720426359,
"grad_norm": 0.6852158308029175,
"learning_rate": 6.638150897808468e-05,
"loss": 1.5744,
"step": 209
},
{
"epoch": 0.04183892015739403,
"grad_norm": 0.7240145802497864,
"learning_rate": 6.607197326515808e-05,
"loss": 1.4104,
"step": 210
},
{
"epoch": 0.04203815311052448,
"grad_norm": 0.8016362190246582,
"learning_rate": 6.57617490823885e-05,
"loss": 1.7332,
"step": 211
},
{
"epoch": 0.04223738606365493,
"grad_norm": 0.813021719455719,
"learning_rate": 6.545084971874738e-05,
"loss": 1.5941,
"step": 212
},
{
"epoch": 0.042436619016785374,
"grad_norm": 0.6962946653366089,
"learning_rate": 6.513928849212873e-05,
"loss": 1.5307,
"step": 213
},
{
"epoch": 0.042635851969915824,
"grad_norm": 0.7284409999847412,
"learning_rate": 6.482707874877854e-05,
"loss": 1.6151,
"step": 214
},
{
"epoch": 0.042835084923046274,
"grad_norm": 0.824651300907135,
"learning_rate": 6.451423386272312e-05,
"loss": 1.4702,
"step": 215
},
{
"epoch": 0.04303431787617672,
"grad_norm": 0.6916880011558533,
"learning_rate": 6.420076723519614e-05,
"loss": 1.6181,
"step": 216
},
{
"epoch": 0.04323355082930717,
"grad_norm": 0.6723554730415344,
"learning_rate": 6.388669229406462e-05,
"loss": 1.4223,
"step": 217
},
{
"epoch": 0.04343278378243762,
"grad_norm": 0.8126989603042603,
"learning_rate": 6.357202249325371e-05,
"loss": 1.457,
"step": 218
},
{
"epoch": 0.04363201673556806,
"grad_norm": 0.606365442276001,
"learning_rate": 6.32567713121704e-05,
"loss": 1.3669,
"step": 219
},
{
"epoch": 0.04383124968869851,
"grad_norm": 1.0885868072509766,
"learning_rate": 6.294095225512603e-05,
"loss": 1.6269,
"step": 220
},
{
"epoch": 0.04403048264182896,
"grad_norm": 0.6699976921081543,
"learning_rate": 6.26245788507579e-05,
"loss": 1.4144,
"step": 221
},
{
"epoch": 0.044229715594959404,
"grad_norm": 0.7255838513374329,
"learning_rate": 6.230766465144967e-05,
"loss": 1.528,
"step": 222
},
{
"epoch": 0.044428948548089854,
"grad_norm": 0.9336103200912476,
"learning_rate": 6.199022323275083e-05,
"loss": 1.4114,
"step": 223
},
{
"epoch": 0.044628181501220304,
"grad_norm": 1.1342967748641968,
"learning_rate": 6.167226819279528e-05,
"loss": 1.6462,
"step": 224
},
{
"epoch": 0.04482741445435075,
"grad_norm": 0.8370170593261719,
"learning_rate": 6.135381315171867e-05,
"loss": 1.6144,
"step": 225
},
{
"epoch": 0.0450266474074812,
"grad_norm": 0.8431183099746704,
"learning_rate": 6.103487175107507e-05,
"loss": 1.787,
"step": 226
},
{
"epoch": 0.04522588036061165,
"grad_norm": 0.776308536529541,
"learning_rate": 6.071545765325254e-05,
"loss": 1.4541,
"step": 227
},
{
"epoch": 0.04542511331374209,
"grad_norm": 0.6885831356048584,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.2892,
"step": 228
},
{
"epoch": 0.04562434626687254,
"grad_norm": 0.848490834236145,
"learning_rate": 6.007526611628086e-05,
"loss": 1.7647,
"step": 229
},
{
"epoch": 0.04582357922000299,
"grad_norm": 0.8826428055763245,
"learning_rate": 5.9754516100806423e-05,
"loss": 1.9109,
"step": 230
},
{
"epoch": 0.046022812173133434,
"grad_norm": 0.8888834118843079,
"learning_rate": 5.9433348234327765e-05,
"loss": 1.7475,
"step": 231
},
{
"epoch": 0.046222045126263885,
"grad_norm": 1.0613147020339966,
"learning_rate": 5.911177627460739e-05,
"loss": 1.852,
"step": 232
},
{
"epoch": 0.046421278079394335,
"grad_norm": 0.91031813621521,
"learning_rate": 5.8789813996717736e-05,
"loss": 1.8365,
"step": 233
},
{
"epoch": 0.04662051103252478,
"grad_norm": 0.8690152764320374,
"learning_rate": 5.8467475192451226e-05,
"loss": 1.7708,
"step": 234
},
{
"epoch": 0.04681974398565523,
"grad_norm": 0.9083284139633179,
"learning_rate": 5.814477366972945e-05,
"loss": 1.8436,
"step": 235
},
{
"epoch": 0.04701897693878568,
"grad_norm": 1.0710521936416626,
"learning_rate": 5.782172325201155e-05,
"loss": 1.9518,
"step": 236
},
{
"epoch": 0.04721820989191612,
"grad_norm": 0.9421839118003845,
"learning_rate": 5.749833777770225e-05,
"loss": 1.9924,
"step": 237
},
{
"epoch": 0.04741744284504657,
"grad_norm": 0.9856808185577393,
"learning_rate": 5.717463109955896e-05,
"loss": 1.9579,
"step": 238
},
{
"epoch": 0.04761667579817702,
"grad_norm": 1.14179527759552,
"learning_rate": 5.685061708409841e-05,
"loss": 1.9985,
"step": 239
},
{
"epoch": 0.047815908751307465,
"grad_norm": 1.4646105766296387,
"learning_rate": 5.6526309611002594e-05,
"loss": 2.0769,
"step": 240
},
{
"epoch": 0.048015141704437915,
"grad_norm": 0.9936044216156006,
"learning_rate": 5.6201722572524275e-05,
"loss": 1.7261,
"step": 241
},
{
"epoch": 0.048214374657568365,
"grad_norm": 1.0193665027618408,
"learning_rate": 5.587686987289189e-05,
"loss": 2.02,
"step": 242
},
{
"epoch": 0.04841360761069881,
"grad_norm": 1.5562198162078857,
"learning_rate": 5.5551765427713884e-05,
"loss": 1.9928,
"step": 243
},
{
"epoch": 0.04861284056382926,
"grad_norm": 0.9775562882423401,
"learning_rate": 5.522642316338268e-05,
"loss": 2.0862,
"step": 244
},
{
"epoch": 0.0488120735169597,
"grad_norm": 1.0259995460510254,
"learning_rate": 5.490085701647805e-05,
"loss": 1.9838,
"step": 245
},
{
"epoch": 0.04901130647009015,
"grad_norm": 1.0052729845046997,
"learning_rate": 5.457508093317013e-05,
"loss": 2.0598,
"step": 246
},
{
"epoch": 0.0492105394232206,
"grad_norm": 0.9661989212036133,
"learning_rate": 5.4249108868622086e-05,
"loss": 2.0546,
"step": 247
},
{
"epoch": 0.049409772376351045,
"grad_norm": 1.0998971462249756,
"learning_rate": 5.392295478639225e-05,
"loss": 2.0453,
"step": 248
},
{
"epoch": 0.049609005329481495,
"grad_norm": 0.9969896078109741,
"learning_rate": 5.359663265783598e-05,
"loss": 2.0773,
"step": 249
},
{
"epoch": 0.049808238282611945,
"grad_norm": 1.1022703647613525,
"learning_rate": 5.327015646150716e-05,
"loss": 2.1264,
"step": 250
},
{
"epoch": 0.05000747123574239,
"grad_norm": 0.7417923808097839,
"learning_rate": 5.294354018255945e-05,
"loss": 1.2255,
"step": 251
},
{
"epoch": 0.05020670418887284,
"grad_norm": 0.8457772135734558,
"learning_rate": 5.26167978121472e-05,
"loss": 1.3468,
"step": 252
},
{
"epoch": 0.05040593714200329,
"grad_norm": 0.8699066042900085,
"learning_rate": 5.228994334682604e-05,
"loss": 1.683,
"step": 253
},
{
"epoch": 0.05060517009513373,
"grad_norm": 0.8360949158668518,
"learning_rate": 5.196299078795344e-05,
"loss": 1.667,
"step": 254
},
{
"epoch": 0.05080440304826418,
"grad_norm": 0.7682618498802185,
"learning_rate": 5.1635954141088813e-05,
"loss": 1.5661,
"step": 255
},
{
"epoch": 0.05100363600139463,
"grad_norm": 1.1287474632263184,
"learning_rate": 5.1308847415393666e-05,
"loss": 1.7158,
"step": 256
},
{
"epoch": 0.051202868954525076,
"grad_norm": 0.7990018725395203,
"learning_rate": 5.0981684623031415e-05,
"loss": 1.4798,
"step": 257
},
{
"epoch": 0.051402101907655526,
"grad_norm": 0.7507796883583069,
"learning_rate": 5.0654479778567223e-05,
"loss": 1.6002,
"step": 258
},
{
"epoch": 0.051601334860785976,
"grad_norm": 0.7758486866950989,
"learning_rate": 5.0327246898367597e-05,
"loss": 1.6382,
"step": 259
},
{
"epoch": 0.05180056781391642,
"grad_norm": 0.756610095500946,
"learning_rate": 5e-05,
"loss": 1.4587,
"step": 260
},
{
"epoch": 0.05199980076704687,
"grad_norm": 0.7428910732269287,
"learning_rate": 4.9672753101632415e-05,
"loss": 1.4796,
"step": 261
},
{
"epoch": 0.05219903372017732,
"grad_norm": 0.9282284379005432,
"learning_rate": 4.934552022143279e-05,
"loss": 1.7188,
"step": 262
},
{
"epoch": 0.05239826667330776,
"grad_norm": 0.8230169415473938,
"learning_rate": 4.901831537696859e-05,
"loss": 1.6737,
"step": 263
},
{
"epoch": 0.05259749962643821,
"grad_norm": 0.8880282640457153,
"learning_rate": 4.869115258460635e-05,
"loss": 1.6383,
"step": 264
},
{
"epoch": 0.05279673257956866,
"grad_norm": 0.6525868773460388,
"learning_rate": 4.83640458589112e-05,
"loss": 1.2746,
"step": 265
},
{
"epoch": 0.052995965532699106,
"grad_norm": 0.8035927414894104,
"learning_rate": 4.8037009212046586e-05,
"loss": 1.5259,
"step": 266
},
{
"epoch": 0.053195198485829556,
"grad_norm": 0.7835052013397217,
"learning_rate": 4.7710056653173976e-05,
"loss": 1.631,
"step": 267
},
{
"epoch": 0.053394431438960006,
"grad_norm": 0.7649900317192078,
"learning_rate": 4.738320218785281e-05,
"loss": 1.6074,
"step": 268
},
{
"epoch": 0.05359366439209045,
"grad_norm": 0.8601043820381165,
"learning_rate": 4.7056459817440544e-05,
"loss": 1.6147,
"step": 269
},
{
"epoch": 0.0537928973452209,
"grad_norm": 0.775799572467804,
"learning_rate": 4.6729843538492847e-05,
"loss": 1.5416,
"step": 270
},
{
"epoch": 0.05399213029835135,
"grad_norm": 0.8741908073425293,
"learning_rate": 4.640336734216403e-05,
"loss": 1.7454,
"step": 271
},
{
"epoch": 0.05419136325148179,
"grad_norm": 0.8385461568832397,
"learning_rate": 4.607704521360776e-05,
"loss": 1.6794,
"step": 272
},
{
"epoch": 0.05439059620461224,
"grad_norm": 0.7103844285011292,
"learning_rate": 4.575089113137792e-05,
"loss": 1.5788,
"step": 273
},
{
"epoch": 0.05458982915774269,
"grad_norm": 0.7964368462562561,
"learning_rate": 4.542491906682989e-05,
"loss": 1.5432,
"step": 274
},
{
"epoch": 0.05478906211087314,
"grad_norm": 0.7998329997062683,
"learning_rate": 4.509914298352197e-05,
"loss": 1.8727,
"step": 275
},
{
"epoch": 0.05498829506400359,
"grad_norm": 0.7981895208358765,
"learning_rate": 4.477357683661734e-05,
"loss": 1.7219,
"step": 276
},
{
"epoch": 0.05518752801713404,
"grad_norm": 0.677744448184967,
"learning_rate": 4.444823457228612e-05,
"loss": 1.5126,
"step": 277
},
{
"epoch": 0.05538676097026448,
"grad_norm": 0.8186181783676147,
"learning_rate": 4.412313012710813e-05,
"loss": 1.7727,
"step": 278
},
{
"epoch": 0.05558599392339493,
"grad_norm": 0.8363473415374756,
"learning_rate": 4.379827742747575e-05,
"loss": 1.4963,
"step": 279
},
{
"epoch": 0.05578522687652538,
"grad_norm": 1.204262614250183,
"learning_rate": 4.347369038899744e-05,
"loss": 1.6811,
"step": 280
},
{
"epoch": 0.055984459829655824,
"grad_norm": 0.9086161851882935,
"learning_rate": 4.3149382915901606e-05,
"loss": 1.8958,
"step": 281
},
{
"epoch": 0.056183692782786274,
"grad_norm": 0.8663166165351868,
"learning_rate": 4.282536890044104e-05,
"loss": 1.8902,
"step": 282
},
{
"epoch": 0.056382925735916724,
"grad_norm": 0.9640662670135498,
"learning_rate": 4.250166222229774e-05,
"loss": 2.0409,
"step": 283
},
{
"epoch": 0.05658215868904717,
"grad_norm": 0.9557836651802063,
"learning_rate": 4.2178276747988446e-05,
"loss": 1.9425,
"step": 284
},
{
"epoch": 0.05678139164217762,
"grad_norm": 1.0826152563095093,
"learning_rate": 4.185522633027057e-05,
"loss": 1.6738,
"step": 285
},
{
"epoch": 0.05698062459530807,
"grad_norm": 0.7835938334465027,
"learning_rate": 4.153252480754877e-05,
"loss": 1.6434,
"step": 286
},
{
"epoch": 0.05717985754843851,
"grad_norm": 0.9007090330123901,
"learning_rate": 4.1210186003282275e-05,
"loss": 1.7945,
"step": 287
},
{
"epoch": 0.05737909050156896,
"grad_norm": 0.9333200454711914,
"learning_rate": 4.088822372539263e-05,
"loss": 2.1531,
"step": 288
},
{
"epoch": 0.057578323454699404,
"grad_norm": 0.9241042137145996,
"learning_rate": 4.0566651765672246e-05,
"loss": 2.0779,
"step": 289
},
{
"epoch": 0.057777556407829854,
"grad_norm": 0.9367052912712097,
"learning_rate": 4.0245483899193595e-05,
"loss": 1.947,
"step": 290
},
{
"epoch": 0.057976789360960304,
"grad_norm": 0.8407790660858154,
"learning_rate": 3.992473388371915e-05,
"loss": 1.6544,
"step": 291
},
{
"epoch": 0.05817602231409075,
"grad_norm": 1.0361599922180176,
"learning_rate": 3.960441545911204e-05,
"loss": 2.0688,
"step": 292
},
{
"epoch": 0.0583752552672212,
"grad_norm": 1.0046849250793457,
"learning_rate": 3.928454234674747e-05,
"loss": 2.1278,
"step": 293
},
{
"epoch": 0.05857448822035165,
"grad_norm": 0.9455753564834595,
"learning_rate": 3.896512824892495e-05,
"loss": 1.8499,
"step": 294
},
{
"epoch": 0.05877372117348209,
"grad_norm": 0.9063624739646912,
"learning_rate": 3.864618684828134e-05,
"loss": 1.9574,
"step": 295
},
{
"epoch": 0.05897295412661254,
"grad_norm": 0.9712712168693542,
"learning_rate": 3.832773180720475e-05,
"loss": 1.9854,
"step": 296
},
{
"epoch": 0.05917218707974299,
"grad_norm": 0.953110933303833,
"learning_rate": 3.800977676724919e-05,
"loss": 1.8418,
"step": 297
},
{
"epoch": 0.059371420032873434,
"grad_norm": 0.8904990553855896,
"learning_rate": 3.769233534855035e-05,
"loss": 1.9232,
"step": 298
},
{
"epoch": 0.059570652986003884,
"grad_norm": 0.9431455135345459,
"learning_rate": 3.73754211492421e-05,
"loss": 1.7605,
"step": 299
},
{
"epoch": 0.059769885939134335,
"grad_norm": 1.0603832006454468,
"learning_rate": 3.705904774487396e-05,
"loss": 2.0183,
"step": 300
},
{
"epoch": 0.059769885939134335,
"eval_loss": 1.7440650463104248,
"eval_runtime": 1312.4715,
"eval_samples_per_second": 6.441,
"eval_steps_per_second": 1.611,
"step": 300
},
{
"epoch": 0.05996911889226478,
"grad_norm": 0.9234136343002319,
"learning_rate": 3.6743228687829595e-05,
"loss": 1.4519,
"step": 301
},
{
"epoch": 0.06016835184539523,
"grad_norm": 0.7455631494522095,
"learning_rate": 3.642797750674629e-05,
"loss": 1.4676,
"step": 302
},
{
"epoch": 0.06036758479852568,
"grad_norm": 0.8243167400360107,
"learning_rate": 3.6113307705935396e-05,
"loss": 1.4413,
"step": 303
},
{
"epoch": 0.06056681775165612,
"grad_norm": 0.7181432843208313,
"learning_rate": 3.579923276480387e-05,
"loss": 1.719,
"step": 304
},
{
"epoch": 0.06076605070478657,
"grad_norm": 0.7262921333312988,
"learning_rate": 3.5485766137276894e-05,
"loss": 1.3258,
"step": 305
},
{
"epoch": 0.06096528365791702,
"grad_norm": 0.6151476502418518,
"learning_rate": 3.5172921251221455e-05,
"loss": 1.2912,
"step": 306
},
{
"epoch": 0.061164516611047465,
"grad_norm": 0.7192188501358032,
"learning_rate": 3.486071150787128e-05,
"loss": 1.5151,
"step": 307
},
{
"epoch": 0.061363749564177915,
"grad_norm": 0.7189763784408569,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.4975,
"step": 308
},
{
"epoch": 0.061562982517308365,
"grad_norm": 0.9334692358970642,
"learning_rate": 3.423825091761153e-05,
"loss": 1.8043,
"step": 309
},
{
"epoch": 0.06176221547043881,
"grad_norm": 0.7157220244407654,
"learning_rate": 3.392802673484193e-05,
"loss": 1.5496,
"step": 310
},
{
"epoch": 0.06196144842356926,
"grad_norm": 0.7380807399749756,
"learning_rate": 3.361849102191533e-05,
"loss": 1.4888,
"step": 311
},
{
"epoch": 0.06216068137669971,
"grad_norm": 0.7653237581253052,
"learning_rate": 3.330965703831146e-05,
"loss": 1.5173,
"step": 312
},
{
"epoch": 0.06235991432983015,
"grad_norm": 0.6636477112770081,
"learning_rate": 3.300153801345028e-05,
"loss": 1.3109,
"step": 313
},
{
"epoch": 0.0625591472829606,
"grad_norm": 0.6692584156990051,
"learning_rate": 3.2694147146125345e-05,
"loss": 1.5803,
"step": 314
},
{
"epoch": 0.06275838023609105,
"grad_norm": 0.7734482884407043,
"learning_rate": 3.2387497603938326e-05,
"loss": 1.5582,
"step": 315
},
{
"epoch": 0.0629576131892215,
"grad_norm": 0.7633421421051025,
"learning_rate": 3.2081602522734986e-05,
"loss": 1.474,
"step": 316
},
{
"epoch": 0.06315684614235194,
"grad_norm": 0.8933658003807068,
"learning_rate": 3.177647500604252e-05,
"loss": 1.4464,
"step": 317
},
{
"epoch": 0.0633560790954824,
"grad_norm": 0.7234926223754883,
"learning_rate": 3.147212812450819e-05,
"loss": 1.5108,
"step": 318
},
{
"epoch": 0.06355531204861284,
"grad_norm": 0.8385280966758728,
"learning_rate": 3.116857491533947e-05,
"loss": 1.6382,
"step": 319
},
{
"epoch": 0.06375454500174328,
"grad_norm": 0.7827748656272888,
"learning_rate": 3.086582838174551e-05,
"loss": 1.7277,
"step": 320
},
{
"epoch": 0.06395377795487374,
"grad_norm": 0.9369984865188599,
"learning_rate": 3.056390149238022e-05,
"loss": 1.7291,
"step": 321
},
{
"epoch": 0.06415301090800418,
"grad_norm": 0.8043080568313599,
"learning_rate": 3.0262807180786647e-05,
"loss": 1.7082,
"step": 322
},
{
"epoch": 0.06435224386113463,
"grad_norm": 0.8048945665359497,
"learning_rate": 2.996255834484296e-05,
"loss": 1.7557,
"step": 323
},
{
"epoch": 0.06455147681426508,
"grad_norm": 0.7266145348548889,
"learning_rate": 2.9663167846209998e-05,
"loss": 1.5275,
"step": 324
},
{
"epoch": 0.06475070976739553,
"grad_norm": 0.9198572039604187,
"learning_rate": 2.936464850978027e-05,
"loss": 1.7147,
"step": 325
},
{
"epoch": 0.06494994272052597,
"grad_norm": 0.8027574419975281,
"learning_rate": 2.9067013123128613e-05,
"loss": 1.7335,
"step": 326
},
{
"epoch": 0.06514917567365643,
"grad_norm": 0.7442224025726318,
"learning_rate": 2.8770274435964355e-05,
"loss": 1.572,
"step": 327
},
{
"epoch": 0.06534840862678687,
"grad_norm": 0.7306829690933228,
"learning_rate": 2.8474445159585235e-05,
"loss": 1.6521,
"step": 328
},
{
"epoch": 0.06554764157991731,
"grad_norm": 0.7424048185348511,
"learning_rate": 2.8179537966332887e-05,
"loss": 1.6062,
"step": 329
},
{
"epoch": 0.06574687453304777,
"grad_norm": 0.7699829936027527,
"learning_rate": 2.7885565489049946e-05,
"loss": 1.5977,
"step": 330
},
{
"epoch": 0.06594610748617821,
"grad_norm": 0.8047099709510803,
"learning_rate": 2.759254032053888e-05,
"loss": 1.655,
"step": 331
},
{
"epoch": 0.06614534043930866,
"grad_norm": 0.8257668018341064,
"learning_rate": 2.7300475013022663e-05,
"loss": 1.5537,
"step": 332
},
{
"epoch": 0.06634457339243911,
"grad_norm": 0.7759456634521484,
"learning_rate": 2.700938207760701e-05,
"loss": 1.7591,
"step": 333
},
{
"epoch": 0.06654380634556956,
"grad_norm": 0.893644392490387,
"learning_rate": 2.671927398374443e-05,
"loss": 1.9046,
"step": 334
},
{
"epoch": 0.0667430392987,
"grad_norm": 0.8226551413536072,
"learning_rate": 2.6430163158700115e-05,
"loss": 1.6834,
"step": 335
},
{
"epoch": 0.06694227225183046,
"grad_norm": 0.8337216973304749,
"learning_rate": 2.6142061987019577e-05,
"loss": 1.6095,
"step": 336
},
{
"epoch": 0.0671415052049609,
"grad_norm": 0.981137752532959,
"learning_rate": 2.5854982809998153e-05,
"loss": 1.9304,
"step": 337
},
{
"epoch": 0.06734073815809134,
"grad_norm": 1.0048673152923584,
"learning_rate": 2.556893792515227e-05,
"loss": 1.9332,
"step": 338
},
{
"epoch": 0.0675399711112218,
"grad_norm": 0.9600777626037598,
"learning_rate": 2.5283939585692783e-05,
"loss": 2.1824,
"step": 339
},
{
"epoch": 0.06773920406435224,
"grad_norm": 0.9388259649276733,
"learning_rate": 2.500000000000001e-05,
"loss": 1.9205,
"step": 340
},
{
"epoch": 0.06793843701748269,
"grad_norm": 1.0500521659851074,
"learning_rate": 2.471713133110078e-05,
"loss": 1.9183,
"step": 341
},
{
"epoch": 0.06813766997061314,
"grad_norm": 1.1563540697097778,
"learning_rate": 2.4435345696147403e-05,
"loss": 2.0606,
"step": 342
},
{
"epoch": 0.06833690292374359,
"grad_norm": 0.9865449070930481,
"learning_rate": 2.4154655165898627e-05,
"loss": 2.1936,
"step": 343
},
{
"epoch": 0.06853613587687403,
"grad_norm": 1.3468120098114014,
"learning_rate": 2.3875071764202563e-05,
"loss": 1.9796,
"step": 344
},
{
"epoch": 0.06873536883000449,
"grad_norm": 0.9959917664527893,
"learning_rate": 2.3596607467481603e-05,
"loss": 2.0457,
"step": 345
},
{
"epoch": 0.06893460178313493,
"grad_norm": 0.9387736320495605,
"learning_rate": 2.3319274204219428e-05,
"loss": 1.8885,
"step": 346
},
{
"epoch": 0.06913383473626537,
"grad_norm": 1.1370142698287964,
"learning_rate": 2.3043083854449988e-05,
"loss": 2.1721,
"step": 347
},
{
"epoch": 0.06933306768939583,
"grad_norm": 0.9565383791923523,
"learning_rate": 2.2768048249248648e-05,
"loss": 2.0364,
"step": 348
},
{
"epoch": 0.06953230064252627,
"grad_norm": 0.9984736442565918,
"learning_rate": 2.2494179170225333e-05,
"loss": 1.9395,
"step": 349
},
{
"epoch": 0.06973153359565672,
"grad_norm": 0.9446450471878052,
"learning_rate": 2.2221488349019903e-05,
"loss": 1.8676,
"step": 350
},
{
"epoch": 0.06993076654878717,
"grad_norm": 0.6334500312805176,
"learning_rate": 2.194998746679952e-05,
"loss": 1.3377,
"step": 351
},
{
"epoch": 0.07012999950191762,
"grad_norm": 0.8363780975341797,
"learning_rate": 2.167968815375837e-05,
"loss": 1.5063,
"step": 352
},
{
"epoch": 0.07032923245504806,
"grad_norm": 0.7027566432952881,
"learning_rate": 2.1410601988619394e-05,
"loss": 1.5537,
"step": 353
},
{
"epoch": 0.07052846540817852,
"grad_norm": 0.726865291595459,
"learning_rate": 2.1142740498138324e-05,
"loss": 1.3377,
"step": 354
},
{
"epoch": 0.07072769836130896,
"grad_norm": 0.7450829148292542,
"learning_rate": 2.08761151566099e-05,
"loss": 1.4613,
"step": 355
},
{
"epoch": 0.0709269313144394,
"grad_norm": 0.7997395992279053,
"learning_rate": 2.061073738537635e-05,
"loss": 1.5064,
"step": 356
},
{
"epoch": 0.07112616426756986,
"grad_norm": 0.6779711842536926,
"learning_rate": 2.034661855233815e-05,
"loss": 1.5039,
"step": 357
},
{
"epoch": 0.0713253972207003,
"grad_norm": 0.6842930912971497,
"learning_rate": 2.008376997146705e-05,
"loss": 1.4626,
"step": 358
},
{
"epoch": 0.07152463017383075,
"grad_norm": 0.7847104072570801,
"learning_rate": 1.982220290232143e-05,
"loss": 1.6606,
"step": 359
},
{
"epoch": 0.0717238631269612,
"grad_norm": 0.6870419383049011,
"learning_rate": 1.9561928549563968e-05,
"loss": 1.4029,
"step": 360
},
{
"epoch": 0.07192309608009165,
"grad_norm": 0.7871290445327759,
"learning_rate": 1.9302958062481673e-05,
"loss": 1.6475,
"step": 361
},
{
"epoch": 0.07212232903322209,
"grad_norm": 0.7864654064178467,
"learning_rate": 1.9045302534508297e-05,
"loss": 1.7248,
"step": 362
},
{
"epoch": 0.07232156198635255,
"grad_norm": 0.7532442808151245,
"learning_rate": 1.8788973002749112e-05,
"loss": 1.5125,
"step": 363
},
{
"epoch": 0.07252079493948299,
"grad_norm": 0.5970544815063477,
"learning_rate": 1.8533980447508137e-05,
"loss": 1.4215,
"step": 364
},
{
"epoch": 0.07272002789261343,
"grad_norm": 0.5979761481285095,
"learning_rate": 1.8280335791817733e-05,
"loss": 1.2157,
"step": 365
},
{
"epoch": 0.07291926084574389,
"grad_norm": 0.7799795269966125,
"learning_rate": 1.8028049900970767e-05,
"loss": 1.5365,
"step": 366
},
{
"epoch": 0.07311849379887433,
"grad_norm": 0.8635774254798889,
"learning_rate": 1.777713358205514e-05,
"loss": 1.535,
"step": 367
},
{
"epoch": 0.07331772675200478,
"grad_norm": 0.7627410292625427,
"learning_rate": 1.7527597583490822e-05,
"loss": 1.5883,
"step": 368
},
{
"epoch": 0.07351695970513523,
"grad_norm": 0.8532704710960388,
"learning_rate": 1.7279452594569483e-05,
"loss": 1.8226,
"step": 369
},
{
"epoch": 0.07371619265826568,
"grad_norm": 0.7512400150299072,
"learning_rate": 1.703270924499656e-05,
"loss": 1.673,
"step": 370
},
{
"epoch": 0.07391542561139612,
"grad_norm": 0.8353219032287598,
"learning_rate": 1.678737810443593e-05,
"loss": 1.6371,
"step": 371
},
{
"epoch": 0.07411465856452658,
"grad_norm": 0.7360902428627014,
"learning_rate": 1.6543469682057106e-05,
"loss": 1.4375,
"step": 372
},
{
"epoch": 0.07431389151765702,
"grad_norm": 0.8248170018196106,
"learning_rate": 1.6300994426085103e-05,
"loss": 1.6235,
"step": 373
},
{
"epoch": 0.07451312447078746,
"grad_norm": 0.7493998408317566,
"learning_rate": 1.605996272335291e-05,
"loss": 1.5191,
"step": 374
},
{
"epoch": 0.07471235742391792,
"grad_norm": 0.760131299495697,
"learning_rate": 1.5820384898856434e-05,
"loss": 1.5488,
"step": 375
},
{
"epoch": 0.07491159037704836,
"grad_norm": 0.8441518545150757,
"learning_rate": 1.5582271215312294e-05,
"loss": 1.6011,
"step": 376
},
{
"epoch": 0.07511082333017881,
"grad_norm": 0.8258650898933411,
"learning_rate": 1.5345631872718214e-05,
"loss": 1.6453,
"step": 377
},
{
"epoch": 0.07531005628330927,
"grad_norm": 0.7505908012390137,
"learning_rate": 1.5110477007916001e-05,
"loss": 1.4798,
"step": 378
},
{
"epoch": 0.07550928923643971,
"grad_norm": 0.7128810286521912,
"learning_rate": 1.4876816694157419e-05,
"loss": 1.5665,
"step": 379
},
{
"epoch": 0.07570852218957015,
"grad_norm": 0.8183525800704956,
"learning_rate": 1.4644660940672627e-05,
"loss": 1.6711,
"step": 380
},
{
"epoch": 0.07590775514270061,
"grad_norm": 0.8301215767860413,
"learning_rate": 1.4414019692241437e-05,
"loss": 1.9458,
"step": 381
},
{
"epoch": 0.07610698809583105,
"grad_norm": 0.8092257380485535,
"learning_rate": 1.4184902828767287e-05,
"loss": 1.7644,
"step": 382
},
{
"epoch": 0.0763062210489615,
"grad_norm": 0.8209111094474792,
"learning_rate": 1.3957320164854059e-05,
"loss": 1.7298,
"step": 383
},
{
"epoch": 0.07650545400209195,
"grad_norm": 0.8352941274642944,
"learning_rate": 1.373128144938563e-05,
"loss": 1.7143,
"step": 384
},
{
"epoch": 0.0767046869552224,
"grad_norm": 0.8569772243499756,
"learning_rate": 1.3506796365108232e-05,
"loss": 1.8154,
"step": 385
},
{
"epoch": 0.07690391990835284,
"grad_norm": 0.8412910103797913,
"learning_rate": 1.3283874528215733e-05,
"loss": 1.8742,
"step": 386
},
{
"epoch": 0.0771031528614833,
"grad_norm": 0.907353937625885,
"learning_rate": 1.3062525487937699e-05,
"loss": 2.0139,
"step": 387
},
{
"epoch": 0.07730238581461374,
"grad_norm": 0.8931063413619995,
"learning_rate": 1.2842758726130283e-05,
"loss": 1.9315,
"step": 388
},
{
"epoch": 0.07750161876774418,
"grad_norm": 1.0445189476013184,
"learning_rate": 1.2624583656870154e-05,
"loss": 2.0511,
"step": 389
},
{
"epoch": 0.07770085172087464,
"grad_norm": 0.9549689888954163,
"learning_rate": 1.2408009626051137e-05,
"loss": 2.019,
"step": 390
},
{
"epoch": 0.07790008467400508,
"grad_norm": 0.8554056286811829,
"learning_rate": 1.2193045910983863e-05,
"loss": 1.7846,
"step": 391
},
{
"epoch": 0.07809931762713553,
"grad_norm": 1.0174126625061035,
"learning_rate": 1.1979701719998453e-05,
"loss": 2.2389,
"step": 392
},
{
"epoch": 0.07829855058026598,
"grad_norm": 1.0677056312561035,
"learning_rate": 1.1767986192049984e-05,
"loss": 2.2263,
"step": 393
},
{
"epoch": 0.07849778353339643,
"grad_norm": 0.9621496200561523,
"learning_rate": 1.1557908396327028e-05,
"loss": 1.9629,
"step": 394
},
{
"epoch": 0.07869701648652687,
"grad_norm": 0.9656440019607544,
"learning_rate": 1.134947733186315e-05,
"loss": 2.1125,
"step": 395
},
{
"epoch": 0.07889624943965733,
"grad_norm": 0.8916715979576111,
"learning_rate": 1.1142701927151456e-05,
"loss": 1.9651,
"step": 396
},
{
"epoch": 0.07909548239278777,
"grad_norm": 1.1278259754180908,
"learning_rate": 1.0937591039762085e-05,
"loss": 2.1121,
"step": 397
},
{
"epoch": 0.07929471534591821,
"grad_norm": 1.0817816257476807,
"learning_rate": 1.0734153455962765e-05,
"loss": 1.9473,
"step": 398
},
{
"epoch": 0.07949394829904867,
"grad_norm": 0.9848653674125671,
"learning_rate": 1.0532397890342505e-05,
"loss": 2.0839,
"step": 399
},
{
"epoch": 0.07969318125217911,
"grad_norm": 1.0025157928466797,
"learning_rate": 1.0332332985438248e-05,
"loss": 1.9569,
"step": 400
},
{
"epoch": 0.07969318125217911,
"eval_loss": 1.725017786026001,
"eval_runtime": 1311.2021,
"eval_samples_per_second": 6.448,
"eval_steps_per_second": 1.612,
"step": 400
},
{
"epoch": 0.07989241420530956,
"grad_norm": 0.6342359185218811,
"learning_rate": 1.013396731136465e-05,
"loss": 1.1818,
"step": 401
},
{
"epoch": 0.08009164715844,
"grad_norm": 0.6265669465065002,
"learning_rate": 9.937309365446973e-06,
"loss": 1.3554,
"step": 402
},
{
"epoch": 0.08029088011157046,
"grad_norm": 0.639958381652832,
"learning_rate": 9.742367571857091e-06,
"loss": 1.4432,
"step": 403
},
{
"epoch": 0.0804901130647009,
"grad_norm": 0.6715696454048157,
"learning_rate": 9.549150281252633e-06,
"loss": 1.5088,
"step": 404
},
{
"epoch": 0.08068934601783134,
"grad_norm": 0.7032715082168579,
"learning_rate": 9.357665770419244e-06,
"loss": 1.4106,
"step": 405
},
{
"epoch": 0.0808885789709618,
"grad_norm": 0.6766837239265442,
"learning_rate": 9.167922241916055e-06,
"loss": 1.4883,
"step": 406
},
{
"epoch": 0.08108781192409224,
"grad_norm": 0.7200549244880676,
"learning_rate": 8.97992782372432e-06,
"loss": 1.5611,
"step": 407
},
{
"epoch": 0.08128704487722269,
"grad_norm": 0.7490854859352112,
"learning_rate": 8.793690568899216e-06,
"loss": 1.4866,
"step": 408
},
{
"epoch": 0.08148627783035314,
"grad_norm": 0.685373067855835,
"learning_rate": 8.609218455224893e-06,
"loss": 1.493,
"step": 409
},
{
"epoch": 0.08168551078348359,
"grad_norm": 0.7181761264801025,
"learning_rate": 8.426519384872733e-06,
"loss": 1.4245,
"step": 410
},
{
"epoch": 0.08188474373661403,
"grad_norm": 0.7508552074432373,
"learning_rate": 8.245601184062852e-06,
"loss": 1.4166,
"step": 411
},
{
"epoch": 0.08208397668974449,
"grad_norm": 0.7618119120597839,
"learning_rate": 8.066471602728803e-06,
"loss": 1.6266,
"step": 412
},
{
"epoch": 0.08228320964287493,
"grad_norm": 0.8253456354141235,
"learning_rate": 7.889138314185678e-06,
"loss": 1.5646,
"step": 413
},
{
"epoch": 0.08248244259600537,
"grad_norm": 0.7223126292228699,
"learning_rate": 7.71360891480134e-06,
"loss": 1.579,
"step": 414
},
{
"epoch": 0.08268167554913583,
"grad_norm": 0.8785665035247803,
"learning_rate": 7.539890923671062e-06,
"loss": 1.8207,
"step": 415
},
{
"epoch": 0.08288090850226627,
"grad_norm": 0.7915347218513489,
"learning_rate": 7.367991782295391e-06,
"loss": 1.5869,
"step": 416
},
{
"epoch": 0.08308014145539672,
"grad_norm": 0.6824432611465454,
"learning_rate": 7.197918854261432e-06,
"loss": 1.3985,
"step": 417
},
{
"epoch": 0.08327937440852717,
"grad_norm": 0.7498596906661987,
"learning_rate": 7.029679424927365e-06,
"loss": 1.4986,
"step": 418
},
{
"epoch": 0.08347860736165762,
"grad_norm": 0.7597733736038208,
"learning_rate": 6.863280701110408e-06,
"loss": 1.7079,
"step": 419
},
{
"epoch": 0.08367784031478806,
"grad_norm": 0.7908221483230591,
"learning_rate": 6.698729810778065e-06,
"loss": 1.5831,
"step": 420
},
{
"epoch": 0.08387707326791852,
"grad_norm": 0.7998091578483582,
"learning_rate": 6.536033802742813e-06,
"loss": 1.7472,
"step": 421
},
{
"epoch": 0.08407630622104896,
"grad_norm": 0.7912585735321045,
"learning_rate": 6.375199646360142e-06,
"loss": 1.6609,
"step": 422
},
{
"epoch": 0.0842755391741794,
"grad_norm": 0.8024159073829651,
"learning_rate": 6.216234231230012e-06,
"loss": 1.8421,
"step": 423
},
{
"epoch": 0.08447477212730986,
"grad_norm": 0.7846222519874573,
"learning_rate": 6.059144366901736e-06,
"loss": 1.5901,
"step": 424
},
{
"epoch": 0.0846740050804403,
"grad_norm": 0.8087249994277954,
"learning_rate": 5.903936782582253e-06,
"loss": 1.6698,
"step": 425
},
{
"epoch": 0.08487323803357075,
"grad_norm": 0.8053250908851624,
"learning_rate": 5.750618126847912e-06,
"loss": 1.6399,
"step": 426
},
{
"epoch": 0.0850724709867012,
"grad_norm": 0.678579568862915,
"learning_rate": 5.599194967359639e-06,
"loss": 1.5368,
"step": 427
},
{
"epoch": 0.08527170393983165,
"grad_norm": 0.7634561657905579,
"learning_rate": 5.449673790581611e-06,
"loss": 1.6473,
"step": 428
},
{
"epoch": 0.08547093689296209,
"grad_norm": 0.8198792338371277,
"learning_rate": 5.302061001503394e-06,
"loss": 1.5986,
"step": 429
},
{
"epoch": 0.08567016984609255,
"grad_norm": 0.9108977913856506,
"learning_rate": 5.156362923365588e-06,
"loss": 1.8712,
"step": 430
},
{
"epoch": 0.08586940279922299,
"grad_norm": 0.7726624011993408,
"learning_rate": 5.012585797388936e-06,
"loss": 1.5413,
"step": 431
},
{
"epoch": 0.08606863575235343,
"grad_norm": 0.8134598135948181,
"learning_rate": 4.87073578250698e-06,
"loss": 1.5729,
"step": 432
},
{
"epoch": 0.08626786870548389,
"grad_norm": 0.8380071520805359,
"learning_rate": 4.730818955102234e-06,
"loss": 1.683,
"step": 433
},
{
"epoch": 0.08646710165861433,
"grad_norm": 0.9267144799232483,
"learning_rate": 4.592841308745932e-06,
"loss": 1.8211,
"step": 434
},
{
"epoch": 0.08666633461174478,
"grad_norm": 0.8285892605781555,
"learning_rate": 4.456808753941205e-06,
"loss": 1.6478,
"step": 435
},
{
"epoch": 0.08686556756487523,
"grad_norm": 0.8663996458053589,
"learning_rate": 4.322727117869951e-06,
"loss": 2.0562,
"step": 436
},
{
"epoch": 0.08706480051800568,
"grad_norm": 0.9194437265396118,
"learning_rate": 4.190602144143207e-06,
"loss": 1.7663,
"step": 437
},
{
"epoch": 0.08726403347113612,
"grad_norm": 0.8766307234764099,
"learning_rate": 4.06043949255509e-06,
"loss": 1.8105,
"step": 438
},
{
"epoch": 0.08746326642426658,
"grad_norm": 1.0402116775512695,
"learning_rate": 3.932244738840379e-06,
"loss": 1.9364,
"step": 439
},
{
"epoch": 0.08766249937739702,
"grad_norm": 0.9294980764389038,
"learning_rate": 3.8060233744356633e-06,
"loss": 1.9542,
"step": 440
},
{
"epoch": 0.08786173233052746,
"grad_norm": 0.8864694237709045,
"learning_rate": 3.681780806244095e-06,
"loss": 1.8497,
"step": 441
},
{
"epoch": 0.08806096528365792,
"grad_norm": 0.9366459250450134,
"learning_rate": 3.5595223564037884e-06,
"loss": 2.0825,
"step": 442
},
{
"epoch": 0.08826019823678836,
"grad_norm": 0.9816076159477234,
"learning_rate": 3.4392532620598216e-06,
"loss": 1.9228,
"step": 443
},
{
"epoch": 0.08845943118991881,
"grad_norm": 0.9872433543205261,
"learning_rate": 3.3209786751399187e-06,
"loss": 2.0644,
"step": 444
},
{
"epoch": 0.08865866414304927,
"grad_norm": 0.8447529673576355,
"learning_rate": 3.2047036621337236e-06,
"loss": 1.8916,
"step": 445
},
{
"epoch": 0.08885789709617971,
"grad_norm": 0.987470269203186,
"learning_rate": 3.0904332038757977e-06,
"loss": 2.1664,
"step": 446
},
{
"epoch": 0.08905713004931015,
"grad_norm": 0.905831515789032,
"learning_rate": 2.978172195332263e-06,
"loss": 1.7784,
"step": 447
},
{
"epoch": 0.08925636300244061,
"grad_norm": 1.0130242109298706,
"learning_rate": 2.8679254453910785e-06,
"loss": 1.9239,
"step": 448
},
{
"epoch": 0.08945559595557105,
"grad_norm": 1.1352851390838623,
"learning_rate": 2.759697676656098e-06,
"loss": 2.1013,
"step": 449
},
{
"epoch": 0.0896548289087015,
"grad_norm": 1.1989761590957642,
"learning_rate": 2.653493525244721e-06,
"loss": 2.1471,
"step": 450
},
{
"epoch": 0.08985406186183195,
"grad_norm": 0.6423122882843018,
"learning_rate": 2.549317540589308e-06,
"loss": 1.3106,
"step": 451
},
{
"epoch": 0.0900532948149624,
"grad_norm": 0.6389409899711609,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.4082,
"step": 452
},
{
"epoch": 0.09025252776809284,
"grad_norm": 0.6329924464225769,
"learning_rate": 2.3470678346851518e-06,
"loss": 1.4597,
"step": 453
},
{
"epoch": 0.0904517607212233,
"grad_norm": 0.6127148866653442,
"learning_rate": 2.2490027771406687e-06,
"loss": 1.176,
"step": 454
},
{
"epoch": 0.09065099367435374,
"grad_norm": 0.6773777604103088,
"learning_rate": 2.152983213389559e-06,
"loss": 1.2917,
"step": 455
},
{
"epoch": 0.09085022662748418,
"grad_norm": 0.7712229490280151,
"learning_rate": 2.0590132565903476e-06,
"loss": 1.7185,
"step": 456
},
{
"epoch": 0.09104945958061464,
"grad_norm": 0.6682973504066467,
"learning_rate": 1.9670969321032407e-06,
"loss": 1.5444,
"step": 457
},
{
"epoch": 0.09124869253374508,
"grad_norm": 0.6766843795776367,
"learning_rate": 1.8772381773176417e-06,
"loss": 1.3107,
"step": 458
},
{
"epoch": 0.09144792548687553,
"grad_norm": 0.773476243019104,
"learning_rate": 1.7894408414835362e-06,
"loss": 1.583,
"step": 459
},
{
"epoch": 0.09164715844000598,
"grad_norm": 0.7078460454940796,
"learning_rate": 1.70370868554659e-06,
"loss": 1.5706,
"step": 460
},
{
"epoch": 0.09184639139313643,
"grad_norm": 0.7150459289550781,
"learning_rate": 1.620045381987012e-06,
"loss": 1.561,
"step": 461
},
{
"epoch": 0.09204562434626687,
"grad_norm": 0.9921646118164062,
"learning_rate": 1.5384545146622852e-06,
"loss": 1.9899,
"step": 462
},
{
"epoch": 0.09224485729939733,
"grad_norm": 0.701726496219635,
"learning_rate": 1.4589395786535953e-06,
"loss": 1.2879,
"step": 463
},
{
"epoch": 0.09244409025252777,
"grad_norm": 0.7816105484962463,
"learning_rate": 1.3815039801161721e-06,
"loss": 1.5822,
"step": 464
},
{
"epoch": 0.09264332320565821,
"grad_norm": 0.7791746258735657,
"learning_rate": 1.3061510361333185e-06,
"loss": 1.6751,
"step": 465
},
{
"epoch": 0.09284255615878867,
"grad_norm": 0.6447046995162964,
"learning_rate": 1.232883974574367e-06,
"loss": 1.3991,
"step": 466
},
{
"epoch": 0.09304178911191911,
"grad_norm": 0.7769410014152527,
"learning_rate": 1.1617059339563807e-06,
"loss": 1.6797,
"step": 467
},
{
"epoch": 0.09324102206504956,
"grad_norm": 0.8174847364425659,
"learning_rate": 1.0926199633097157e-06,
"loss": 1.786,
"step": 468
},
{
"epoch": 0.09344025501818001,
"grad_norm": 0.7846056222915649,
"learning_rate": 1.0256290220474307e-06,
"loss": 1.6683,
"step": 469
},
{
"epoch": 0.09363948797131046,
"grad_norm": 0.8444880843162537,
"learning_rate": 9.607359798384785e-07,
"loss": 1.7476,
"step": 470
},
{
"epoch": 0.0938387209244409,
"grad_norm": 0.7403116822242737,
"learning_rate": 8.979436164848088e-07,
"loss": 1.4758,
"step": 471
},
{
"epoch": 0.09403795387757136,
"grad_norm": 0.7375995516777039,
"learning_rate": 8.372546218022747e-07,
"loss": 1.6429,
"step": 472
},
{
"epoch": 0.0942371868307018,
"grad_norm": 0.8594033718109131,
"learning_rate": 7.786715955054203e-07,
"loss": 1.675,
"step": 473
},
{
"epoch": 0.09443641978383224,
"grad_norm": 0.7836558222770691,
"learning_rate": 7.221970470961125e-07,
"loss": 1.6502,
"step": 474
},
{
"epoch": 0.0946356527369627,
"grad_norm": 0.7155219316482544,
"learning_rate": 6.678333957560512e-07,
"loss": 1.4955,
"step": 475
},
{
"epoch": 0.09483488569009314,
"grad_norm": 0.6998591423034668,
"learning_rate": 6.15582970243117e-07,
"loss": 1.4825,
"step": 476
},
{
"epoch": 0.09503411864322359,
"grad_norm": 0.9176483154296875,
"learning_rate": 5.654480087916303e-07,
"loss": 1.8354,
"step": 477
},
{
"epoch": 0.09523335159635404,
"grad_norm": 0.8327824473381042,
"learning_rate": 5.174306590164879e-07,
"loss": 1.7847,
"step": 478
},
{
"epoch": 0.09543258454948449,
"grad_norm": 0.7299410700798035,
"learning_rate": 4.715329778211375e-07,
"loss": 1.5685,
"step": 479
},
{
"epoch": 0.09563181750261493,
"grad_norm": 0.9764321446418762,
"learning_rate": 4.277569313094809e-07,
"loss": 1.7331,
"step": 480
},
{
"epoch": 0.09583105045574539,
"grad_norm": 0.779161810874939,
"learning_rate": 3.8610439470164737e-07,
"loss": 1.5425,
"step": 481
},
{
"epoch": 0.09603028340887583,
"grad_norm": 1.092887043952942,
"learning_rate": 3.465771522536854e-07,
"loss": 1.5982,
"step": 482
},
{
"epoch": 0.09622951636200627,
"grad_norm": 1.0371736288070679,
"learning_rate": 3.09176897181096e-07,
"loss": 1.7093,
"step": 483
},
{
"epoch": 0.09642874931513673,
"grad_norm": 0.9150737524032593,
"learning_rate": 2.7390523158633554e-07,
"loss": 2.003,
"step": 484
},
{
"epoch": 0.09662798226826717,
"grad_norm": 0.88661789894104,
"learning_rate": 2.407636663901591e-07,
"loss": 1.8276,
"step": 485
},
{
"epoch": 0.09682721522139762,
"grad_norm": 0.8697336316108704,
"learning_rate": 2.0975362126691712e-07,
"loss": 2.0095,
"step": 486
},
{
"epoch": 0.09702644817452807,
"grad_norm": 0.8557798266410828,
"learning_rate": 1.8087642458373134e-07,
"loss": 1.7249,
"step": 487
},
{
"epoch": 0.09722568112765852,
"grad_norm": 0.8817839622497559,
"learning_rate": 1.5413331334360182e-07,
"loss": 1.8848,
"step": 488
},
{
"epoch": 0.09742491408078896,
"grad_norm": 0.8968345522880554,
"learning_rate": 1.2952543313240472e-07,
"loss": 1.9211,
"step": 489
},
{
"epoch": 0.0976241470339194,
"grad_norm": 0.8569303750991821,
"learning_rate": 1.0705383806982606e-07,
"loss": 1.8268,
"step": 490
},
{
"epoch": 0.09782337998704986,
"grad_norm": 0.878320574760437,
"learning_rate": 8.671949076420882e-08,
"loss": 1.7088,
"step": 491
},
{
"epoch": 0.0980226129401803,
"grad_norm": 1.0637781620025635,
"learning_rate": 6.852326227130834e-08,
"loss": 2.1718,
"step": 492
},
{
"epoch": 0.09822184589331075,
"grad_norm": 1.0918681621551514,
"learning_rate": 5.246593205699424e-08,
"loss": 2.0975,
"step": 493
},
{
"epoch": 0.0984210788464412,
"grad_norm": 0.9981033205986023,
"learning_rate": 3.8548187963854956e-08,
"loss": 1.9647,
"step": 494
},
{
"epoch": 0.09862031179957165,
"grad_norm": 0.8573983311653137,
"learning_rate": 2.6770626181715773e-08,
"loss": 1.8779,
"step": 495
},
{
"epoch": 0.09881954475270209,
"grad_norm": 0.9774297475814819,
"learning_rate": 1.7133751222137007e-08,
"loss": 1.9759,
"step": 496
},
{
"epoch": 0.09901877770583255,
"grad_norm": 0.9767397046089172,
"learning_rate": 9.637975896759077e-09,
"loss": 2.1979,
"step": 497
},
{
"epoch": 0.09921801065896299,
"grad_norm": 0.9074400067329407,
"learning_rate": 4.2836212996499865e-09,
"loss": 1.7405,
"step": 498
},
{
"epoch": 0.09941724361209343,
"grad_norm": 0.9676385521888733,
"learning_rate": 1.0709167935385455e-09,
"loss": 2.088,
"step": 499
},
{
"epoch": 0.09961647656522389,
"grad_norm": 1.0142868757247925,
"learning_rate": 0.0,
"loss": 1.9986,
"step": 500
},
{
"epoch": 0.09961647656522389,
"eval_loss": 1.7225215435028076,
"eval_runtime": 1306.9234,
"eval_samples_per_second": 6.469,
"eval_steps_per_second": 1.618,
"step": 500
}
],
"logging_steps": 1,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.1094434267136e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}