lesso's picture
Training in progress, step 200, checkpoint
851e4d0 verified
{
"best_metric": 0.8324354290962219,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.013036534889026497,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 6.518267444513249e-05,
"grad_norm": 4.084769248962402,
"learning_rate": 1.004e-05,
"loss": 1.0714,
"step": 1
},
{
"epoch": 6.518267444513249e-05,
"eval_loss": 3.5102760791778564,
"eval_runtime": 744.1991,
"eval_samples_per_second": 8.68,
"eval_steps_per_second": 2.17,
"step": 1
},
{
"epoch": 0.00013036534889026498,
"grad_norm": 5.269199371337891,
"learning_rate": 2.008e-05,
"loss": 1.5721,
"step": 2
},
{
"epoch": 0.00019554802333539745,
"grad_norm": 5.509669303894043,
"learning_rate": 3.012e-05,
"loss": 1.0653,
"step": 3
},
{
"epoch": 0.00026073069778052995,
"grad_norm": 5.992783546447754,
"learning_rate": 4.016e-05,
"loss": 1.1367,
"step": 4
},
{
"epoch": 0.0003259133722256624,
"grad_norm": 5.95001745223999,
"learning_rate": 5.02e-05,
"loss": 1.4664,
"step": 5
},
{
"epoch": 0.0003910960466707949,
"grad_norm": 6.181279182434082,
"learning_rate": 6.024e-05,
"loss": 1.5197,
"step": 6
},
{
"epoch": 0.0004562787211159274,
"grad_norm": 5.237460613250732,
"learning_rate": 7.028e-05,
"loss": 1.4659,
"step": 7
},
{
"epoch": 0.0005214613955610599,
"grad_norm": 7.470530033111572,
"learning_rate": 8.032e-05,
"loss": 2.5472,
"step": 8
},
{
"epoch": 0.0005866440700061923,
"grad_norm": 7.4010443687438965,
"learning_rate": 9.036000000000001e-05,
"loss": 1.8851,
"step": 9
},
{
"epoch": 0.0006518267444513248,
"grad_norm": 8.14273452758789,
"learning_rate": 0.0001004,
"loss": 2.5444,
"step": 10
},
{
"epoch": 0.0007170094188964573,
"grad_norm": 5.8861165046691895,
"learning_rate": 9.987157894736842e-05,
"loss": 1.9468,
"step": 11
},
{
"epoch": 0.0007821920933415898,
"grad_norm": 5.406398296356201,
"learning_rate": 9.934315789473684e-05,
"loss": 1.0875,
"step": 12
},
{
"epoch": 0.0008473747677867222,
"grad_norm": 6.182877540588379,
"learning_rate": 9.881473684210525e-05,
"loss": 1.5979,
"step": 13
},
{
"epoch": 0.0009125574422318548,
"grad_norm": 8.538721084594727,
"learning_rate": 9.828631578947369e-05,
"loss": 1.8065,
"step": 14
},
{
"epoch": 0.0009777401166769872,
"grad_norm": 5.54311466217041,
"learning_rate": 9.77578947368421e-05,
"loss": 1.0368,
"step": 15
},
{
"epoch": 0.0010429227911221198,
"grad_norm": 11.207380294799805,
"learning_rate": 9.722947368421052e-05,
"loss": 1.5687,
"step": 16
},
{
"epoch": 0.0011081054655672521,
"grad_norm": 6.547550201416016,
"learning_rate": 9.670105263157895e-05,
"loss": 1.0427,
"step": 17
},
{
"epoch": 0.0011732881400123847,
"grad_norm": 5.821341037750244,
"learning_rate": 9.617263157894737e-05,
"loss": 1.2702,
"step": 18
},
{
"epoch": 0.0012384708144575172,
"grad_norm": 8.170310974121094,
"learning_rate": 9.564421052631579e-05,
"loss": 1.4842,
"step": 19
},
{
"epoch": 0.0013036534889026496,
"grad_norm": 7.119532585144043,
"learning_rate": 9.511578947368421e-05,
"loss": 1.2944,
"step": 20
},
{
"epoch": 0.0013688361633477821,
"grad_norm": 6.549607276916504,
"learning_rate": 9.458736842105264e-05,
"loss": 1.1745,
"step": 21
},
{
"epoch": 0.0014340188377929147,
"grad_norm": 5.023319244384766,
"learning_rate": 9.405894736842106e-05,
"loss": 0.7572,
"step": 22
},
{
"epoch": 0.001499201512238047,
"grad_norm": 6.467738628387451,
"learning_rate": 9.353052631578947e-05,
"loss": 1.0398,
"step": 23
},
{
"epoch": 0.0015643841866831796,
"grad_norm": 5.155070781707764,
"learning_rate": 9.300210526315789e-05,
"loss": 0.6913,
"step": 24
},
{
"epoch": 0.0016295668611283121,
"grad_norm": 5.410099506378174,
"learning_rate": 9.247368421052631e-05,
"loss": 1.116,
"step": 25
},
{
"epoch": 0.0016947495355734445,
"grad_norm": 9.872712135314941,
"learning_rate": 9.194526315789473e-05,
"loss": 1.8271,
"step": 26
},
{
"epoch": 0.001759932210018577,
"grad_norm": 8.195897102355957,
"learning_rate": 9.141684210526316e-05,
"loss": 1.082,
"step": 27
},
{
"epoch": 0.0018251148844637096,
"grad_norm": 7.203644275665283,
"learning_rate": 9.088842105263158e-05,
"loss": 1.1428,
"step": 28
},
{
"epoch": 0.001890297558908842,
"grad_norm": 9.383421897888184,
"learning_rate": 9.036000000000001e-05,
"loss": 0.9886,
"step": 29
},
{
"epoch": 0.0019554802333539745,
"grad_norm": 4.832867622375488,
"learning_rate": 8.983157894736843e-05,
"loss": 1.019,
"step": 30
},
{
"epoch": 0.002020662907799107,
"grad_norm": 8.182337760925293,
"learning_rate": 8.930315789473684e-05,
"loss": 1.0352,
"step": 31
},
{
"epoch": 0.0020858455822442396,
"grad_norm": 3.0797736644744873,
"learning_rate": 8.877473684210526e-05,
"loss": 0.6499,
"step": 32
},
{
"epoch": 0.002151028256689372,
"grad_norm": 4.614433765411377,
"learning_rate": 8.824631578947368e-05,
"loss": 1.3117,
"step": 33
},
{
"epoch": 0.0022162109311345043,
"grad_norm": 4.8648457527160645,
"learning_rate": 8.771789473684211e-05,
"loss": 0.9818,
"step": 34
},
{
"epoch": 0.002281393605579637,
"grad_norm": 5.053056716918945,
"learning_rate": 8.718947368421053e-05,
"loss": 1.0728,
"step": 35
},
{
"epoch": 0.0023465762800247694,
"grad_norm": 5.258237838745117,
"learning_rate": 8.666105263157895e-05,
"loss": 1.3109,
"step": 36
},
{
"epoch": 0.002411758954469902,
"grad_norm": 6.49020528793335,
"learning_rate": 8.613263157894737e-05,
"loss": 1.5304,
"step": 37
},
{
"epoch": 0.0024769416289150345,
"grad_norm": 5.650185585021973,
"learning_rate": 8.560421052631578e-05,
"loss": 1.5754,
"step": 38
},
{
"epoch": 0.002542124303360167,
"grad_norm": 4.012632846832275,
"learning_rate": 8.50757894736842e-05,
"loss": 1.0375,
"step": 39
},
{
"epoch": 0.002607306977805299,
"grad_norm": 4.553121089935303,
"learning_rate": 8.454736842105263e-05,
"loss": 1.0791,
"step": 40
},
{
"epoch": 0.0026724896522504317,
"grad_norm": 4.67976188659668,
"learning_rate": 8.401894736842106e-05,
"loss": 0.9995,
"step": 41
},
{
"epoch": 0.0027376723266955643,
"grad_norm": 5.818678855895996,
"learning_rate": 8.349052631578948e-05,
"loss": 0.9256,
"step": 42
},
{
"epoch": 0.002802855001140697,
"grad_norm": 5.402655601501465,
"learning_rate": 8.29621052631579e-05,
"loss": 1.1302,
"step": 43
},
{
"epoch": 0.0028680376755858294,
"grad_norm": 10.834538459777832,
"learning_rate": 8.243368421052632e-05,
"loss": 0.818,
"step": 44
},
{
"epoch": 0.002933220350030962,
"grad_norm": 4.870290279388428,
"learning_rate": 8.190526315789474e-05,
"loss": 0.9021,
"step": 45
},
{
"epoch": 0.002998403024476094,
"grad_norm": 4.933202266693115,
"learning_rate": 8.137684210526315e-05,
"loss": 0.801,
"step": 46
},
{
"epoch": 0.0030635856989212266,
"grad_norm": 6.761687278747559,
"learning_rate": 8.084842105263157e-05,
"loss": 1.3199,
"step": 47
},
{
"epoch": 0.003128768373366359,
"grad_norm": 6.689528465270996,
"learning_rate": 8.032e-05,
"loss": 1.3152,
"step": 48
},
{
"epoch": 0.0031939510478114917,
"grad_norm": 5.506531238555908,
"learning_rate": 7.979157894736842e-05,
"loss": 1.2228,
"step": 49
},
{
"epoch": 0.0032591337222566243,
"grad_norm": 5.423097133636475,
"learning_rate": 7.926315789473684e-05,
"loss": 1.4467,
"step": 50
},
{
"epoch": 0.0032591337222566243,
"eval_loss": 0.9954817891120911,
"eval_runtime": 745.98,
"eval_samples_per_second": 8.66,
"eval_steps_per_second": 2.165,
"step": 50
},
{
"epoch": 0.003324316396701757,
"grad_norm": 3.4636971950531006,
"learning_rate": 7.873473684210526e-05,
"loss": 0.6352,
"step": 51
},
{
"epoch": 0.003389499071146889,
"grad_norm": 3.1654837131500244,
"learning_rate": 7.820631578947369e-05,
"loss": 0.8974,
"step": 52
},
{
"epoch": 0.0034546817455920215,
"grad_norm": 2.099487781524658,
"learning_rate": 7.76778947368421e-05,
"loss": 0.2316,
"step": 53
},
{
"epoch": 0.003519864420037154,
"grad_norm": 1.574611783027649,
"learning_rate": 7.714947368421052e-05,
"loss": 0.2663,
"step": 54
},
{
"epoch": 0.0035850470944822866,
"grad_norm": 2.4194858074188232,
"learning_rate": 7.662105263157896e-05,
"loss": 0.491,
"step": 55
},
{
"epoch": 0.003650229768927419,
"grad_norm": 1.953523874282837,
"learning_rate": 7.609263157894737e-05,
"loss": 0.449,
"step": 56
},
{
"epoch": 0.0037154124433725517,
"grad_norm": 2.2616214752197266,
"learning_rate": 7.556421052631579e-05,
"loss": 0.3948,
"step": 57
},
{
"epoch": 0.003780595117817684,
"grad_norm": 2.246051788330078,
"learning_rate": 7.503578947368421e-05,
"loss": 0.5058,
"step": 58
},
{
"epoch": 0.0038457777922628164,
"grad_norm": 2.9115405082702637,
"learning_rate": 7.450736842105263e-05,
"loss": 0.7811,
"step": 59
},
{
"epoch": 0.003910960466707949,
"grad_norm": 4.887603759765625,
"learning_rate": 7.397894736842105e-05,
"loss": 0.7756,
"step": 60
},
{
"epoch": 0.003976143141153081,
"grad_norm": 2.9186997413635254,
"learning_rate": 7.345052631578948e-05,
"loss": 0.4695,
"step": 61
},
{
"epoch": 0.004041325815598214,
"grad_norm": 4.976742267608643,
"learning_rate": 7.29221052631579e-05,
"loss": 0.868,
"step": 62
},
{
"epoch": 0.004106508490043346,
"grad_norm": 4.973067760467529,
"learning_rate": 7.239368421052631e-05,
"loss": 1.181,
"step": 63
},
{
"epoch": 0.004171691164488479,
"grad_norm": 5.87078857421875,
"learning_rate": 7.186526315789474e-05,
"loss": 1.1077,
"step": 64
},
{
"epoch": 0.004236873838933611,
"grad_norm": 3.100754737854004,
"learning_rate": 7.133684210526316e-05,
"loss": 0.5465,
"step": 65
},
{
"epoch": 0.004302056513378744,
"grad_norm": 6.342639446258545,
"learning_rate": 7.080842105263158e-05,
"loss": 1.0568,
"step": 66
},
{
"epoch": 0.0043672391878238764,
"grad_norm": 3.8607728481292725,
"learning_rate": 7.028e-05,
"loss": 1.054,
"step": 67
},
{
"epoch": 0.0044324218622690086,
"grad_norm": 4.340756893157959,
"learning_rate": 6.975157894736843e-05,
"loss": 1.2158,
"step": 68
},
{
"epoch": 0.0044976045367141415,
"grad_norm": 2.7851240634918213,
"learning_rate": 6.922315789473685e-05,
"loss": 0.6975,
"step": 69
},
{
"epoch": 0.004562787211159274,
"grad_norm": 19.89763832092285,
"learning_rate": 6.869473684210527e-05,
"loss": 1.2703,
"step": 70
},
{
"epoch": 0.004627969885604407,
"grad_norm": 5.404323101043701,
"learning_rate": 6.816631578947368e-05,
"loss": 0.9856,
"step": 71
},
{
"epoch": 0.004693152560049539,
"grad_norm": 5.077646255493164,
"learning_rate": 6.76378947368421e-05,
"loss": 1.2272,
"step": 72
},
{
"epoch": 0.004758335234494671,
"grad_norm": 11.702156066894531,
"learning_rate": 6.710947368421052e-05,
"loss": 1.2462,
"step": 73
},
{
"epoch": 0.004823517908939804,
"grad_norm": 1.9628050327301025,
"learning_rate": 6.658105263157894e-05,
"loss": 0.5371,
"step": 74
},
{
"epoch": 0.004888700583384936,
"grad_norm": 4.088584899902344,
"learning_rate": 6.605263157894737e-05,
"loss": 0.9884,
"step": 75
},
{
"epoch": 0.004953883257830069,
"grad_norm": 4.946320533752441,
"learning_rate": 6.55242105263158e-05,
"loss": 1.036,
"step": 76
},
{
"epoch": 0.005019065932275201,
"grad_norm": 3.6775155067443848,
"learning_rate": 6.499578947368422e-05,
"loss": 1.1745,
"step": 77
},
{
"epoch": 0.005084248606720334,
"grad_norm": 3.1447501182556152,
"learning_rate": 6.446736842105264e-05,
"loss": 0.7307,
"step": 78
},
{
"epoch": 0.005149431281165466,
"grad_norm": 3.2460031509399414,
"learning_rate": 6.393894736842105e-05,
"loss": 0.9647,
"step": 79
},
{
"epoch": 0.005214613955610598,
"grad_norm": 7.355772972106934,
"learning_rate": 6.341052631578947e-05,
"loss": 1.0611,
"step": 80
},
{
"epoch": 0.005279796630055731,
"grad_norm": 6.00462007522583,
"learning_rate": 6.288210526315789e-05,
"loss": 1.0856,
"step": 81
},
{
"epoch": 0.0053449793045008635,
"grad_norm": 4.158636569976807,
"learning_rate": 6.235368421052632e-05,
"loss": 0.9878,
"step": 82
},
{
"epoch": 0.0054101619789459965,
"grad_norm": 4.67047643661499,
"learning_rate": 6.182526315789474e-05,
"loss": 1.1027,
"step": 83
},
{
"epoch": 0.005475344653391129,
"grad_norm": 4.211904525756836,
"learning_rate": 6.129684210526316e-05,
"loss": 0.9119,
"step": 84
},
{
"epoch": 0.005540527327836261,
"grad_norm": 4.3183064460754395,
"learning_rate": 6.076842105263158e-05,
"loss": 0.991,
"step": 85
},
{
"epoch": 0.005605710002281394,
"grad_norm": 3.8753435611724854,
"learning_rate": 6.024e-05,
"loss": 0.9348,
"step": 86
},
{
"epoch": 0.005670892676726526,
"grad_norm": 5.450557231903076,
"learning_rate": 5.971157894736842e-05,
"loss": 1.4856,
"step": 87
},
{
"epoch": 0.005736075351171659,
"grad_norm": 5.696898460388184,
"learning_rate": 5.9183157894736835e-05,
"loss": 1.1666,
"step": 88
},
{
"epoch": 0.005801258025616791,
"grad_norm": 3.665428876876831,
"learning_rate": 5.8654736842105267e-05,
"loss": 0.7272,
"step": 89
},
{
"epoch": 0.005866440700061924,
"grad_norm": 7.14247989654541,
"learning_rate": 5.8126315789473684e-05,
"loss": 1.0759,
"step": 90
},
{
"epoch": 0.005931623374507056,
"grad_norm": 4.450038909912109,
"learning_rate": 5.759789473684211e-05,
"loss": 1.0025,
"step": 91
},
{
"epoch": 0.005996806048952188,
"grad_norm": 3.9177703857421875,
"learning_rate": 5.706947368421053e-05,
"loss": 0.7917,
"step": 92
},
{
"epoch": 0.006061988723397321,
"grad_norm": 5.90571403503418,
"learning_rate": 5.6541052631578945e-05,
"loss": 1.0844,
"step": 93
},
{
"epoch": 0.006127171397842453,
"grad_norm": 7.018702983856201,
"learning_rate": 5.601263157894736e-05,
"loss": 1.0232,
"step": 94
},
{
"epoch": 0.006192354072287586,
"grad_norm": 3.4453823566436768,
"learning_rate": 5.5484210526315794e-05,
"loss": 0.7434,
"step": 95
},
{
"epoch": 0.006257536746732718,
"grad_norm": 6.936615943908691,
"learning_rate": 5.495578947368421e-05,
"loss": 1.3415,
"step": 96
},
{
"epoch": 0.006322719421177851,
"grad_norm": 11.391502380371094,
"learning_rate": 5.442736842105264e-05,
"loss": 1.4938,
"step": 97
},
{
"epoch": 0.0063879020956229835,
"grad_norm": 3.610074520111084,
"learning_rate": 5.3898947368421055e-05,
"loss": 0.9637,
"step": 98
},
{
"epoch": 0.006453084770068116,
"grad_norm": 6.011956214904785,
"learning_rate": 5.337052631578947e-05,
"loss": 1.492,
"step": 99
},
{
"epoch": 0.006518267444513249,
"grad_norm": 6.378303050994873,
"learning_rate": 5.284210526315789e-05,
"loss": 1.1898,
"step": 100
},
{
"epoch": 0.006518267444513249,
"eval_loss": 0.9247763156890869,
"eval_runtime": 746.0854,
"eval_samples_per_second": 8.659,
"eval_steps_per_second": 2.165,
"step": 100
},
{
"epoch": 0.006583450118958381,
"grad_norm": 2.645019769668579,
"learning_rate": 5.231368421052631e-05,
"loss": 0.6677,
"step": 101
},
{
"epoch": 0.006648632793403514,
"grad_norm": 2.862882375717163,
"learning_rate": 5.178526315789474e-05,
"loss": 0.7984,
"step": 102
},
{
"epoch": 0.006713815467848646,
"grad_norm": 2.1093852519989014,
"learning_rate": 5.1256842105263165e-05,
"loss": 0.5116,
"step": 103
},
{
"epoch": 0.006778998142293778,
"grad_norm": 1.6436916589736938,
"learning_rate": 5.072842105263158e-05,
"loss": 0.3384,
"step": 104
},
{
"epoch": 0.006844180816738911,
"grad_norm": 1.8502347469329834,
"learning_rate": 5.02e-05,
"loss": 0.3001,
"step": 105
},
{
"epoch": 0.006909363491184043,
"grad_norm": 3.300266742706299,
"learning_rate": 4.967157894736842e-05,
"loss": 0.3394,
"step": 106
},
{
"epoch": 0.006974546165629176,
"grad_norm": 3.8908684253692627,
"learning_rate": 4.914315789473684e-05,
"loss": 0.8438,
"step": 107
},
{
"epoch": 0.007039728840074308,
"grad_norm": 2.5882935523986816,
"learning_rate": 4.861473684210526e-05,
"loss": 0.764,
"step": 108
},
{
"epoch": 0.007104911514519441,
"grad_norm": 2.4956729412078857,
"learning_rate": 4.8086315789473686e-05,
"loss": 0.5656,
"step": 109
},
{
"epoch": 0.007170094188964573,
"grad_norm": 3.178035020828247,
"learning_rate": 4.7557894736842104e-05,
"loss": 0.5182,
"step": 110
},
{
"epoch": 0.007235276863409705,
"grad_norm": 2.6953673362731934,
"learning_rate": 4.702947368421053e-05,
"loss": 0.7,
"step": 111
},
{
"epoch": 0.007300459537854838,
"grad_norm": 4.535560607910156,
"learning_rate": 4.6501052631578946e-05,
"loss": 0.6026,
"step": 112
},
{
"epoch": 0.0073656422122999705,
"grad_norm": 3.3130948543548584,
"learning_rate": 4.5972631578947364e-05,
"loss": 0.6538,
"step": 113
},
{
"epoch": 0.0074308248867451035,
"grad_norm": 5.011913776397705,
"learning_rate": 4.544421052631579e-05,
"loss": 1.1627,
"step": 114
},
{
"epoch": 0.007496007561190236,
"grad_norm": 2.643364906311035,
"learning_rate": 4.4915789473684213e-05,
"loss": 0.6814,
"step": 115
},
{
"epoch": 0.007561190235635368,
"grad_norm": 4.945113182067871,
"learning_rate": 4.438736842105263e-05,
"loss": 1.4463,
"step": 116
},
{
"epoch": 0.007626372910080501,
"grad_norm": 4.396795749664307,
"learning_rate": 4.3858947368421056e-05,
"loss": 1.09,
"step": 117
},
{
"epoch": 0.007691555584525633,
"grad_norm": 4.34941291809082,
"learning_rate": 4.3330526315789474e-05,
"loss": 0.6612,
"step": 118
},
{
"epoch": 0.007756738258970766,
"grad_norm": 4.358571529388428,
"learning_rate": 4.280210526315789e-05,
"loss": 1.0953,
"step": 119
},
{
"epoch": 0.007821920933415898,
"grad_norm": 4.529819488525391,
"learning_rate": 4.2273684210526317e-05,
"loss": 1.2583,
"step": 120
},
{
"epoch": 0.00788710360786103,
"grad_norm": 3.2337148189544678,
"learning_rate": 4.174526315789474e-05,
"loss": 1.2295,
"step": 121
},
{
"epoch": 0.007952286282306162,
"grad_norm": 3.514124631881714,
"learning_rate": 4.121684210526316e-05,
"loss": 0.7983,
"step": 122
},
{
"epoch": 0.008017468956751296,
"grad_norm": 4.527546405792236,
"learning_rate": 4.068842105263158e-05,
"loss": 1.1304,
"step": 123
},
{
"epoch": 0.008082651631196428,
"grad_norm": 3.7314906120300293,
"learning_rate": 4.016e-05,
"loss": 0.9276,
"step": 124
},
{
"epoch": 0.00814783430564156,
"grad_norm": 3.9742982387542725,
"learning_rate": 3.963157894736842e-05,
"loss": 1.1314,
"step": 125
},
{
"epoch": 0.008213016980086692,
"grad_norm": 3.210330009460449,
"learning_rate": 3.9103157894736844e-05,
"loss": 0.8935,
"step": 126
},
{
"epoch": 0.008278199654531826,
"grad_norm": 2.126352310180664,
"learning_rate": 3.857473684210526e-05,
"loss": 0.5475,
"step": 127
},
{
"epoch": 0.008343382328976958,
"grad_norm": 4.862457275390625,
"learning_rate": 3.804631578947369e-05,
"loss": 1.1201,
"step": 128
},
{
"epoch": 0.00840856500342209,
"grad_norm": 4.386620044708252,
"learning_rate": 3.7517894736842105e-05,
"loss": 1.243,
"step": 129
},
{
"epoch": 0.008473747677867223,
"grad_norm": 5.318674564361572,
"learning_rate": 3.698947368421052e-05,
"loss": 1.3332,
"step": 130
},
{
"epoch": 0.008538930352312355,
"grad_norm": 5.400907039642334,
"learning_rate": 3.646105263157895e-05,
"loss": 1.4768,
"step": 131
},
{
"epoch": 0.008604113026757489,
"grad_norm": 3.384183406829834,
"learning_rate": 3.593263157894737e-05,
"loss": 1.0143,
"step": 132
},
{
"epoch": 0.00866929570120262,
"grad_norm": 3.478654384613037,
"learning_rate": 3.540421052631579e-05,
"loss": 0.8405,
"step": 133
},
{
"epoch": 0.008734478375647753,
"grad_norm": 5.532976150512695,
"learning_rate": 3.4875789473684215e-05,
"loss": 1.3522,
"step": 134
},
{
"epoch": 0.008799661050092885,
"grad_norm": 3.775825023651123,
"learning_rate": 3.434736842105263e-05,
"loss": 0.9939,
"step": 135
},
{
"epoch": 0.008864843724538017,
"grad_norm": 4.642473220825195,
"learning_rate": 3.381894736842105e-05,
"loss": 0.7214,
"step": 136
},
{
"epoch": 0.008930026398983151,
"grad_norm": 27.070499420166016,
"learning_rate": 3.329052631578947e-05,
"loss": 1.0064,
"step": 137
},
{
"epoch": 0.008995209073428283,
"grad_norm": 4.223586559295654,
"learning_rate": 3.27621052631579e-05,
"loss": 1.1072,
"step": 138
},
{
"epoch": 0.009060391747873415,
"grad_norm": 2.780446767807007,
"learning_rate": 3.223368421052632e-05,
"loss": 0.7691,
"step": 139
},
{
"epoch": 0.009125574422318547,
"grad_norm": 3.2398338317871094,
"learning_rate": 3.1705263157894736e-05,
"loss": 0.7892,
"step": 140
},
{
"epoch": 0.00919075709676368,
"grad_norm": 2.2731664180755615,
"learning_rate": 3.117684210526316e-05,
"loss": 0.581,
"step": 141
},
{
"epoch": 0.009255939771208813,
"grad_norm": 4.545046329498291,
"learning_rate": 3.064842105263158e-05,
"loss": 0.9304,
"step": 142
},
{
"epoch": 0.009321122445653945,
"grad_norm": 3.062983512878418,
"learning_rate": 3.012e-05,
"loss": 0.7803,
"step": 143
},
{
"epoch": 0.009386305120099078,
"grad_norm": 3.362913131713867,
"learning_rate": 2.9591578947368418e-05,
"loss": 0.8837,
"step": 144
},
{
"epoch": 0.00945148779454421,
"grad_norm": 4.0667877197265625,
"learning_rate": 2.9063157894736842e-05,
"loss": 0.8816,
"step": 145
},
{
"epoch": 0.009516670468989342,
"grad_norm": 3.24399733543396,
"learning_rate": 2.8534736842105264e-05,
"loss": 0.6107,
"step": 146
},
{
"epoch": 0.009581853143434476,
"grad_norm": 4.025416374206543,
"learning_rate": 2.800631578947368e-05,
"loss": 0.8663,
"step": 147
},
{
"epoch": 0.009647035817879608,
"grad_norm": 3.7384579181671143,
"learning_rate": 2.7477894736842106e-05,
"loss": 0.9184,
"step": 148
},
{
"epoch": 0.00971221849232474,
"grad_norm": 4.581092834472656,
"learning_rate": 2.6949473684210527e-05,
"loss": 1.0749,
"step": 149
},
{
"epoch": 0.009777401166769872,
"grad_norm": 7.011988162994385,
"learning_rate": 2.6421052631578945e-05,
"loss": 1.1314,
"step": 150
},
{
"epoch": 0.009777401166769872,
"eval_loss": 0.8550050854682922,
"eval_runtime": 746.3587,
"eval_samples_per_second": 8.655,
"eval_steps_per_second": 2.164,
"step": 150
},
{
"epoch": 0.009842583841215006,
"grad_norm": 2.339585304260254,
"learning_rate": 2.589263157894737e-05,
"loss": 0.7291,
"step": 151
},
{
"epoch": 0.009907766515660138,
"grad_norm": 2.318110942840576,
"learning_rate": 2.536421052631579e-05,
"loss": 0.5631,
"step": 152
},
{
"epoch": 0.00997294919010527,
"grad_norm": 1.509214997291565,
"learning_rate": 2.483578947368421e-05,
"loss": 0.2818,
"step": 153
},
{
"epoch": 0.010038131864550402,
"grad_norm": 1.4966317415237427,
"learning_rate": 2.430736842105263e-05,
"loss": 0.3039,
"step": 154
},
{
"epoch": 0.010103314538995534,
"grad_norm": 1.6486977338790894,
"learning_rate": 2.3778947368421052e-05,
"loss": 0.2881,
"step": 155
},
{
"epoch": 0.010168497213440668,
"grad_norm": 1.94291090965271,
"learning_rate": 2.3250526315789473e-05,
"loss": 0.3744,
"step": 156
},
{
"epoch": 0.0102336798878858,
"grad_norm": 2.5989363193511963,
"learning_rate": 2.2722105263157894e-05,
"loss": 0.7534,
"step": 157
},
{
"epoch": 0.010298862562330932,
"grad_norm": 2.6126620769500732,
"learning_rate": 2.2193684210526316e-05,
"loss": 0.7143,
"step": 158
},
{
"epoch": 0.010364045236776065,
"grad_norm": 3.0555806159973145,
"learning_rate": 2.1665263157894737e-05,
"loss": 0.8661,
"step": 159
},
{
"epoch": 0.010429227911221197,
"grad_norm": 2.486401319503784,
"learning_rate": 2.1136842105263158e-05,
"loss": 0.7079,
"step": 160
},
{
"epoch": 0.01049441058566633,
"grad_norm": 2.7490785121917725,
"learning_rate": 2.060842105263158e-05,
"loss": 0.7556,
"step": 161
},
{
"epoch": 0.010559593260111463,
"grad_norm": 3.5909423828125,
"learning_rate": 2.008e-05,
"loss": 0.8089,
"step": 162
},
{
"epoch": 0.010624775934556595,
"grad_norm": 2.8206937313079834,
"learning_rate": 1.9551578947368422e-05,
"loss": 0.5847,
"step": 163
},
{
"epoch": 0.010689958609001727,
"grad_norm": 4.731099605560303,
"learning_rate": 1.9023157894736843e-05,
"loss": 1.1395,
"step": 164
},
{
"epoch": 0.010755141283446859,
"grad_norm": 3.4779961109161377,
"learning_rate": 1.849473684210526e-05,
"loss": 0.8823,
"step": 165
},
{
"epoch": 0.010820323957891993,
"grad_norm": 4.600359916687012,
"learning_rate": 1.7966315789473686e-05,
"loss": 0.9135,
"step": 166
},
{
"epoch": 0.010885506632337125,
"grad_norm": 4.061426162719727,
"learning_rate": 1.7437894736842107e-05,
"loss": 1.0511,
"step": 167
},
{
"epoch": 0.010950689306782257,
"grad_norm": 4.084474563598633,
"learning_rate": 1.6909473684210525e-05,
"loss": 1.023,
"step": 168
},
{
"epoch": 0.01101587198122739,
"grad_norm": 4.1737213134765625,
"learning_rate": 1.638105263157895e-05,
"loss": 0.6724,
"step": 169
},
{
"epoch": 0.011081054655672521,
"grad_norm": 4.323464393615723,
"learning_rate": 1.5852631578947368e-05,
"loss": 0.7298,
"step": 170
},
{
"epoch": 0.011146237330117655,
"grad_norm": 3.48018217086792,
"learning_rate": 1.532421052631579e-05,
"loss": 0.9651,
"step": 171
},
{
"epoch": 0.011211420004562787,
"grad_norm": 3.573929786682129,
"learning_rate": 1.4795789473684209e-05,
"loss": 0.9659,
"step": 172
},
{
"epoch": 0.01127660267900792,
"grad_norm": 2.933013677597046,
"learning_rate": 1.4267368421052632e-05,
"loss": 0.6419,
"step": 173
},
{
"epoch": 0.011341785353453052,
"grad_norm": 2.557969808578491,
"learning_rate": 1.3738947368421053e-05,
"loss": 0.4679,
"step": 174
},
{
"epoch": 0.011406968027898185,
"grad_norm": 4.5971360206604,
"learning_rate": 1.3210526315789473e-05,
"loss": 0.8361,
"step": 175
},
{
"epoch": 0.011472150702343318,
"grad_norm": 2.6714653968811035,
"learning_rate": 1.2682105263157896e-05,
"loss": 0.7694,
"step": 176
},
{
"epoch": 0.01153733337678845,
"grad_norm": 2.248840570449829,
"learning_rate": 1.2153684210526315e-05,
"loss": 0.5984,
"step": 177
},
{
"epoch": 0.011602516051233582,
"grad_norm": 3.2296483516693115,
"learning_rate": 1.1625263157894737e-05,
"loss": 0.9471,
"step": 178
},
{
"epoch": 0.011667698725678714,
"grad_norm": 2.628018856048584,
"learning_rate": 1.1096842105263158e-05,
"loss": 0.6132,
"step": 179
},
{
"epoch": 0.011732881400123848,
"grad_norm": 2.5440778732299805,
"learning_rate": 1.0568421052631579e-05,
"loss": 0.4686,
"step": 180
},
{
"epoch": 0.01179806407456898,
"grad_norm": 5.853560447692871,
"learning_rate": 1.004e-05,
"loss": 0.9851,
"step": 181
},
{
"epoch": 0.011863246749014112,
"grad_norm": 3.293943405151367,
"learning_rate": 9.511578947368422e-06,
"loss": 0.8526,
"step": 182
},
{
"epoch": 0.011928429423459244,
"grad_norm": 4.2562761306762695,
"learning_rate": 8.983157894736843e-06,
"loss": 1.2255,
"step": 183
},
{
"epoch": 0.011993612097904376,
"grad_norm": 2.7283856868743896,
"learning_rate": 8.454736842105263e-06,
"loss": 0.7548,
"step": 184
},
{
"epoch": 0.01205879477234951,
"grad_norm": 3.0538840293884277,
"learning_rate": 7.926315789473684e-06,
"loss": 0.9394,
"step": 185
},
{
"epoch": 0.012123977446794642,
"grad_norm": 3.4769504070281982,
"learning_rate": 7.397894736842104e-06,
"loss": 0.7831,
"step": 186
},
{
"epoch": 0.012189160121239774,
"grad_norm": 4.124552249908447,
"learning_rate": 6.8694736842105265e-06,
"loss": 1.0468,
"step": 187
},
{
"epoch": 0.012254342795684907,
"grad_norm": 5.11678409576416,
"learning_rate": 6.341052631578948e-06,
"loss": 1.5542,
"step": 188
},
{
"epoch": 0.012319525470130039,
"grad_norm": 5.838722229003906,
"learning_rate": 5.812631578947368e-06,
"loss": 0.9434,
"step": 189
},
{
"epoch": 0.012384708144575172,
"grad_norm": 3.940918445587158,
"learning_rate": 5.2842105263157896e-06,
"loss": 1.0149,
"step": 190
},
{
"epoch": 0.012449890819020305,
"grad_norm": 3.6323320865631104,
"learning_rate": 4.755789473684211e-06,
"loss": 1.1067,
"step": 191
},
{
"epoch": 0.012515073493465437,
"grad_norm": 3.954714775085449,
"learning_rate": 4.227368421052631e-06,
"loss": 0.8935,
"step": 192
},
{
"epoch": 0.012580256167910569,
"grad_norm": 4.876105785369873,
"learning_rate": 3.698947368421052e-06,
"loss": 1.1635,
"step": 193
},
{
"epoch": 0.012645438842355703,
"grad_norm": 3.779630661010742,
"learning_rate": 3.170526315789474e-06,
"loss": 1.0547,
"step": 194
},
{
"epoch": 0.012710621516800835,
"grad_norm": 6.356569290161133,
"learning_rate": 2.6421052631578948e-06,
"loss": 1.4103,
"step": 195
},
{
"epoch": 0.012775804191245967,
"grad_norm": 5.530601978302002,
"learning_rate": 2.1136842105263157e-06,
"loss": 1.4511,
"step": 196
},
{
"epoch": 0.012840986865691099,
"grad_norm": 7.286373138427734,
"learning_rate": 1.585263157894737e-06,
"loss": 0.8836,
"step": 197
},
{
"epoch": 0.012906169540136231,
"grad_norm": 3.2843291759490967,
"learning_rate": 1.0568421052631578e-06,
"loss": 0.8748,
"step": 198
},
{
"epoch": 0.012971352214581365,
"grad_norm": 5.327868938446045,
"learning_rate": 5.284210526315789e-07,
"loss": 0.8412,
"step": 199
},
{
"epoch": 0.013036534889026497,
"grad_norm": 5.048506259918213,
"learning_rate": 0.0,
"loss": 1.0769,
"step": 200
},
{
"epoch": 0.013036534889026497,
"eval_loss": 0.8324354290962219,
"eval_runtime": 746.1953,
"eval_samples_per_second": 8.657,
"eval_steps_per_second": 2.164,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.844270609478451e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}