Timesformer_BdSLW60 / trainer_state.json
Shawon16's picture
End of training
0eb3c43 verified
{
"best_metric": 0.6516264428121721,
"best_model_checkpoint": "/media/cse/HDD/Shawon/shawon/Timesformer_BdSLW60/checkpoint-1754",
"epoch": 2.1990867579908677,
"eval_steps": 500,
"global_step": 2190,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0045662100456621,
"grad_norm": 13.168639183044434,
"learning_rate": 2.2831050228310503e-06,
"loss": 4.2832,
"step": 10
},
{
"epoch": 0.0091324200913242,
"grad_norm": 12.616332054138184,
"learning_rate": 4.566210045662101e-06,
"loss": 4.2594,
"step": 20
},
{
"epoch": 0.0136986301369863,
"grad_norm": 12.632943153381348,
"learning_rate": 6.849315068493151e-06,
"loss": 4.1827,
"step": 30
},
{
"epoch": 0.0182648401826484,
"grad_norm": 12.6991548538208,
"learning_rate": 9.132420091324201e-06,
"loss": 4.0751,
"step": 40
},
{
"epoch": 0.0228310502283105,
"grad_norm": 13.425009727478027,
"learning_rate": 1.1415525114155251e-05,
"loss": 4.0899,
"step": 50
},
{
"epoch": 0.0273972602739726,
"grad_norm": 13.551825523376465,
"learning_rate": 1.3698630136986302e-05,
"loss": 3.9588,
"step": 60
},
{
"epoch": 0.0319634703196347,
"grad_norm": 12.522140502929688,
"learning_rate": 1.5981735159817352e-05,
"loss": 3.9251,
"step": 70
},
{
"epoch": 0.0365296803652968,
"grad_norm": 12.543455123901367,
"learning_rate": 1.8264840182648402e-05,
"loss": 3.8922,
"step": 80
},
{
"epoch": 0.0410958904109589,
"grad_norm": 13.488080024719238,
"learning_rate": 2.0547945205479453e-05,
"loss": 4.0068,
"step": 90
},
{
"epoch": 0.045662100456621,
"grad_norm": 12.935967445373535,
"learning_rate": 2.2831050228310503e-05,
"loss": 3.6687,
"step": 100
},
{
"epoch": 0.0502283105022831,
"grad_norm": 12.15580940246582,
"learning_rate": 2.5114155251141553e-05,
"loss": 3.5509,
"step": 110
},
{
"epoch": 0.0547945205479452,
"grad_norm": 13.309331893920898,
"learning_rate": 2.7397260273972603e-05,
"loss": 3.4915,
"step": 120
},
{
"epoch": 0.0593607305936073,
"grad_norm": 12.072916030883789,
"learning_rate": 2.9680365296803654e-05,
"loss": 3.3421,
"step": 130
},
{
"epoch": 0.0639269406392694,
"grad_norm": 13.800947189331055,
"learning_rate": 3.1963470319634704e-05,
"loss": 3.3284,
"step": 140
},
{
"epoch": 0.0684931506849315,
"grad_norm": 14.214157104492188,
"learning_rate": 3.424657534246575e-05,
"loss": 3.1462,
"step": 150
},
{
"epoch": 0.0730593607305936,
"grad_norm": 12.837231636047363,
"learning_rate": 3.6529680365296805e-05,
"loss": 2.9169,
"step": 160
},
{
"epoch": 0.0776255707762557,
"grad_norm": 14.62553882598877,
"learning_rate": 3.881278538812785e-05,
"loss": 2.9004,
"step": 170
},
{
"epoch": 0.0821917808219178,
"grad_norm": 14.03178596496582,
"learning_rate": 4.1095890410958905e-05,
"loss": 2.9808,
"step": 180
},
{
"epoch": 0.0867579908675799,
"grad_norm": 12.56568717956543,
"learning_rate": 4.337899543378995e-05,
"loss": 2.6627,
"step": 190
},
{
"epoch": 0.091324200913242,
"grad_norm": 14.119081497192383,
"learning_rate": 4.5662100456621006e-05,
"loss": 2.4799,
"step": 200
},
{
"epoch": 0.0958904109589041,
"grad_norm": 12.76337718963623,
"learning_rate": 4.794520547945205e-05,
"loss": 2.318,
"step": 210
},
{
"epoch": 0.1004566210045662,
"grad_norm": 12.352985382080078,
"learning_rate": 4.997463216641299e-05,
"loss": 2.0831,
"step": 220
},
{
"epoch": 0.1050228310502283,
"grad_norm": 11.853866577148438,
"learning_rate": 4.9720953830542875e-05,
"loss": 2.4107,
"step": 230
},
{
"epoch": 0.1095890410958904,
"grad_norm": 13.12740421295166,
"learning_rate": 4.9467275494672755e-05,
"loss": 2.0582,
"step": 240
},
{
"epoch": 0.1141552511415525,
"grad_norm": 11.828140258789062,
"learning_rate": 4.9213597158802636e-05,
"loss": 1.8289,
"step": 250
},
{
"epoch": 0.1187214611872146,
"grad_norm": 12.555204391479492,
"learning_rate": 4.895991882293252e-05,
"loss": 1.5664,
"step": 260
},
{
"epoch": 0.1232876712328767,
"grad_norm": 10.697933197021484,
"learning_rate": 4.8706240487062404e-05,
"loss": 1.5136,
"step": 270
},
{
"epoch": 0.1278538812785388,
"grad_norm": 11.606474876403809,
"learning_rate": 4.8452562151192285e-05,
"loss": 1.6229,
"step": 280
},
{
"epoch": 0.1324200913242009,
"grad_norm": 9.681914329528809,
"learning_rate": 4.819888381532217e-05,
"loss": 1.7203,
"step": 290
},
{
"epoch": 0.136986301369863,
"grad_norm": 10.477850914001465,
"learning_rate": 4.794520547945205e-05,
"loss": 1.5071,
"step": 300
},
{
"epoch": 0.1415525114155251,
"grad_norm": 11.697348594665527,
"learning_rate": 4.769152714358194e-05,
"loss": 1.6212,
"step": 310
},
{
"epoch": 0.1461187214611872,
"grad_norm": 11.230805397033691,
"learning_rate": 4.743784880771182e-05,
"loss": 1.3522,
"step": 320
},
{
"epoch": 0.1506849315068493,
"grad_norm": 8.138943672180176,
"learning_rate": 4.71841704718417e-05,
"loss": 1.2338,
"step": 330
},
{
"epoch": 0.1552511415525114,
"grad_norm": 10.334748268127441,
"learning_rate": 4.693049213597159e-05,
"loss": 1.3393,
"step": 340
},
{
"epoch": 0.1598173515981735,
"grad_norm": 9.896265029907227,
"learning_rate": 4.667681380010147e-05,
"loss": 1.261,
"step": 350
},
{
"epoch": 0.1643835616438356,
"grad_norm": 10.67037582397461,
"learning_rate": 4.642313546423136e-05,
"loss": 1.2365,
"step": 360
},
{
"epoch": 0.1689497716894977,
"grad_norm": 7.820501327514648,
"learning_rate": 4.616945712836124e-05,
"loss": 1.0388,
"step": 370
},
{
"epoch": 0.1735159817351598,
"grad_norm": 8.09377670288086,
"learning_rate": 4.591577879249112e-05,
"loss": 1.0919,
"step": 380
},
{
"epoch": 0.1780821917808219,
"grad_norm": 9.110677719116211,
"learning_rate": 4.5662100456621006e-05,
"loss": 1.0683,
"step": 390
},
{
"epoch": 0.182648401826484,
"grad_norm": 11.42436695098877,
"learning_rate": 4.5408422120750886e-05,
"loss": 1.0649,
"step": 400
},
{
"epoch": 0.1872146118721461,
"grad_norm": 9.24187183380127,
"learning_rate": 4.5154743784880774e-05,
"loss": 0.8407,
"step": 410
},
{
"epoch": 0.1917808219178082,
"grad_norm": 8.836689949035645,
"learning_rate": 4.4901065449010655e-05,
"loss": 0.8219,
"step": 420
},
{
"epoch": 0.1963470319634703,
"grad_norm": 13.152447700500488,
"learning_rate": 4.4647387113140535e-05,
"loss": 1.064,
"step": 430
},
{
"epoch": 0.2009132420091324,
"grad_norm": 10.30240249633789,
"learning_rate": 4.439370877727042e-05,
"loss": 0.7285,
"step": 440
},
{
"epoch": 0.2054794520547945,
"grad_norm": 6.908230781555176,
"learning_rate": 4.41400304414003e-05,
"loss": 0.9826,
"step": 450
},
{
"epoch": 0.2100456621004566,
"grad_norm": 6.0176920890808105,
"learning_rate": 4.3886352105530184e-05,
"loss": 0.7598,
"step": 460
},
{
"epoch": 0.2146118721461187,
"grad_norm": 6.647716999053955,
"learning_rate": 4.363267376966007e-05,
"loss": 0.8257,
"step": 470
},
{
"epoch": 0.2191780821917808,
"grad_norm": 6.749544620513916,
"learning_rate": 4.337899543378995e-05,
"loss": 0.8272,
"step": 480
},
{
"epoch": 0.2237442922374429,
"grad_norm": 7.512877464294434,
"learning_rate": 4.312531709791984e-05,
"loss": 0.8007,
"step": 490
},
{
"epoch": 0.228310502283105,
"grad_norm": 6.311679363250732,
"learning_rate": 4.287163876204972e-05,
"loss": 0.7219,
"step": 500
},
{
"epoch": 0.2328767123287671,
"grad_norm": 6.0948591232299805,
"learning_rate": 4.26179604261796e-05,
"loss": 0.5973,
"step": 510
},
{
"epoch": 0.2374429223744292,
"grad_norm": 9.038314819335938,
"learning_rate": 4.236428209030949e-05,
"loss": 0.784,
"step": 520
},
{
"epoch": 0.2420091324200913,
"grad_norm": 5.299352645874023,
"learning_rate": 4.211060375443937e-05,
"loss": 0.5438,
"step": 530
},
{
"epoch": 0.2465753424657534,
"grad_norm": 7.578807353973389,
"learning_rate": 4.1856925418569256e-05,
"loss": 0.7382,
"step": 540
},
{
"epoch": 0.2511415525114155,
"grad_norm": 9.358942985534668,
"learning_rate": 4.160324708269914e-05,
"loss": 0.7584,
"step": 550
},
{
"epoch": 0.2557077625570776,
"grad_norm": 9.18459701538086,
"learning_rate": 4.134956874682902e-05,
"loss": 0.8444,
"step": 560
},
{
"epoch": 0.2602739726027397,
"grad_norm": 5.982091426849365,
"learning_rate": 4.1095890410958905e-05,
"loss": 0.4002,
"step": 570
},
{
"epoch": 0.2648401826484018,
"grad_norm": 4.503027439117432,
"learning_rate": 4.0842212075088786e-05,
"loss": 0.5723,
"step": 580
},
{
"epoch": 0.2694063926940639,
"grad_norm": 7.2720794677734375,
"learning_rate": 4.058853373921867e-05,
"loss": 0.5846,
"step": 590
},
{
"epoch": 0.273972602739726,
"grad_norm": 6.161769390106201,
"learning_rate": 4.0334855403348554e-05,
"loss": 0.5559,
"step": 600
},
{
"epoch": 0.2785388127853881,
"grad_norm": 8.040207862854004,
"learning_rate": 4.0081177067478435e-05,
"loss": 0.5126,
"step": 610
},
{
"epoch": 0.2831050228310502,
"grad_norm": 5.092132568359375,
"learning_rate": 3.982749873160832e-05,
"loss": 0.5201,
"step": 620
},
{
"epoch": 0.2876712328767123,
"grad_norm": 9.829987525939941,
"learning_rate": 3.95738203957382e-05,
"loss": 0.4563,
"step": 630
},
{
"epoch": 0.2922374429223744,
"grad_norm": 7.451713562011719,
"learning_rate": 3.932014205986809e-05,
"loss": 0.7734,
"step": 640
},
{
"epoch": 0.2968036529680365,
"grad_norm": 9.06767463684082,
"learning_rate": 3.906646372399797e-05,
"loss": 0.4621,
"step": 650
},
{
"epoch": 0.3013698630136986,
"grad_norm": 8.293368339538574,
"learning_rate": 3.881278538812785e-05,
"loss": 0.5231,
"step": 660
},
{
"epoch": 0.3059360730593607,
"grad_norm": 9.901214599609375,
"learning_rate": 3.855910705225774e-05,
"loss": 0.6035,
"step": 670
},
{
"epoch": 0.3105022831050228,
"grad_norm": 10.380690574645996,
"learning_rate": 3.830542871638762e-05,
"loss": 0.3312,
"step": 680
},
{
"epoch": 0.3150684931506849,
"grad_norm": 12.21066665649414,
"learning_rate": 3.80517503805175e-05,
"loss": 0.5626,
"step": 690
},
{
"epoch": 0.319634703196347,
"grad_norm": 8.75660514831543,
"learning_rate": 3.779807204464739e-05,
"loss": 0.5877,
"step": 700
},
{
"epoch": 0.3242009132420091,
"grad_norm": 5.634203910827637,
"learning_rate": 3.754439370877727e-05,
"loss": 0.7606,
"step": 710
},
{
"epoch": 0.3287671232876712,
"grad_norm": 4.741566181182861,
"learning_rate": 3.7290715372907156e-05,
"loss": 0.5575,
"step": 720
},
{
"epoch": 0.3333333333333333,
"grad_norm": 5.109163284301758,
"learning_rate": 3.7037037037037037e-05,
"loss": 0.4544,
"step": 730
},
{
"epoch": 0.3378995433789954,
"grad_norm": 10.15185832977295,
"learning_rate": 3.678335870116692e-05,
"loss": 0.408,
"step": 740
},
{
"epoch": 0.3424657534246575,
"grad_norm": 8.459287643432617,
"learning_rate": 3.6529680365296805e-05,
"loss": 0.518,
"step": 750
},
{
"epoch": 0.3470319634703196,
"grad_norm": 4.05381441116333,
"learning_rate": 3.6276002029426685e-05,
"loss": 0.5425,
"step": 760
},
{
"epoch": 0.3515981735159817,
"grad_norm": 7.6227335929870605,
"learning_rate": 3.602232369355657e-05,
"loss": 0.4797,
"step": 770
},
{
"epoch": 0.3561643835616438,
"grad_norm": 4.937866687774658,
"learning_rate": 3.5768645357686453e-05,
"loss": 0.447,
"step": 780
},
{
"epoch": 0.3607305936073059,
"grad_norm": 6.663622856140137,
"learning_rate": 3.5514967021816334e-05,
"loss": 0.6484,
"step": 790
},
{
"epoch": 0.365296803652968,
"grad_norm": 2.132852792739868,
"learning_rate": 3.526128868594622e-05,
"loss": 0.5992,
"step": 800
},
{
"epoch": 0.3698630136986301,
"grad_norm": 3.264267921447754,
"learning_rate": 3.50076103500761e-05,
"loss": 0.3265,
"step": 810
},
{
"epoch": 0.3744292237442922,
"grad_norm": 1.495285153388977,
"learning_rate": 3.475393201420599e-05,
"loss": 0.2921,
"step": 820
},
{
"epoch": 0.3789954337899543,
"grad_norm": 1.2416194677352905,
"learning_rate": 3.450025367833587e-05,
"loss": 0.3851,
"step": 830
},
{
"epoch": 0.3835616438356164,
"grad_norm": 6.346595287322998,
"learning_rate": 3.424657534246575e-05,
"loss": 0.3337,
"step": 840
},
{
"epoch": 0.3881278538812785,
"grad_norm": 3.725011110305786,
"learning_rate": 3.399289700659564e-05,
"loss": 0.3301,
"step": 850
},
{
"epoch": 0.3926940639269406,
"grad_norm": 5.670325756072998,
"learning_rate": 3.373921867072552e-05,
"loss": 0.499,
"step": 860
},
{
"epoch": 0.3972602739726027,
"grad_norm": 7.0639424324035645,
"learning_rate": 3.34855403348554e-05,
"loss": 0.3419,
"step": 870
},
{
"epoch": 0.4004566210045662,
"eval_accuracy": 0.5540398740818469,
"eval_loss": 1.5632456541061401,
"eval_runtime": 168.0602,
"eval_samples_per_second": 5.671,
"eval_steps_per_second": 2.838,
"step": 877
},
{
"epoch": 1.0013698630136987,
"grad_norm": 2.454108953475952,
"learning_rate": 3.323186199898529e-05,
"loss": 0.334,
"step": 880
},
{
"epoch": 1.0059360730593607,
"grad_norm": 12.557369232177734,
"learning_rate": 3.297818366311517e-05,
"loss": 0.2568,
"step": 890
},
{
"epoch": 1.0105022831050228,
"grad_norm": 4.680070400238037,
"learning_rate": 3.2724505327245055e-05,
"loss": 0.2696,
"step": 900
},
{
"epoch": 1.015068493150685,
"grad_norm": 2.2027194499969482,
"learning_rate": 3.2470826991374936e-05,
"loss": 0.4776,
"step": 910
},
{
"epoch": 1.019634703196347,
"grad_norm": 8.404707908630371,
"learning_rate": 3.221714865550482e-05,
"loss": 0.2588,
"step": 920
},
{
"epoch": 1.0242009132420091,
"grad_norm": 1.3184863328933716,
"learning_rate": 3.1963470319634704e-05,
"loss": 0.2601,
"step": 930
},
{
"epoch": 1.0287671232876712,
"grad_norm": 3.3989500999450684,
"learning_rate": 3.1709791983764585e-05,
"loss": 0.3734,
"step": 940
},
{
"epoch": 1.0333333333333334,
"grad_norm": 6.5863142013549805,
"learning_rate": 3.145611364789447e-05,
"loss": 0.4799,
"step": 950
},
{
"epoch": 1.0378995433789955,
"grad_norm": 5.605319023132324,
"learning_rate": 3.120243531202435e-05,
"loss": 0.2629,
"step": 960
},
{
"epoch": 1.0424657534246575,
"grad_norm": 5.677365779876709,
"learning_rate": 3.0948756976154234e-05,
"loss": 0.4172,
"step": 970
},
{
"epoch": 1.0470319634703196,
"grad_norm": 6.943356037139893,
"learning_rate": 3.069507864028412e-05,
"loss": 0.4699,
"step": 980
},
{
"epoch": 1.0515981735159818,
"grad_norm": 1.7512027025222778,
"learning_rate": 3.0441400304414e-05,
"loss": 0.1905,
"step": 990
},
{
"epoch": 1.0561643835616439,
"grad_norm": 3.9878628253936768,
"learning_rate": 3.0187721968543886e-05,
"loss": 0.4574,
"step": 1000
},
{
"epoch": 1.060730593607306,
"grad_norm": 1.668309211730957,
"learning_rate": 2.993404363267377e-05,
"loss": 0.3441,
"step": 1010
},
{
"epoch": 1.065296803652968,
"grad_norm": 14.312126159667969,
"learning_rate": 2.9680365296803654e-05,
"loss": 0.4114,
"step": 1020
},
{
"epoch": 1.0698630136986302,
"grad_norm": 2.793670654296875,
"learning_rate": 2.9426686960933534e-05,
"loss": 0.292,
"step": 1030
},
{
"epoch": 1.0744292237442923,
"grad_norm": 0.9054046273231506,
"learning_rate": 2.917300862506342e-05,
"loss": 0.2573,
"step": 1040
},
{
"epoch": 1.0789954337899543,
"grad_norm": 3.951117515563965,
"learning_rate": 2.8919330289193303e-05,
"loss": 0.4457,
"step": 1050
},
{
"epoch": 1.0835616438356164,
"grad_norm": 10.71568775177002,
"learning_rate": 2.8665651953323187e-05,
"loss": 0.2332,
"step": 1060
},
{
"epoch": 1.0881278538812786,
"grad_norm": 5.735054969787598,
"learning_rate": 2.841197361745307e-05,
"loss": 0.2876,
"step": 1070
},
{
"epoch": 1.0926940639269407,
"grad_norm": 5.521303176879883,
"learning_rate": 2.815829528158295e-05,
"loss": 0.3894,
"step": 1080
},
{
"epoch": 1.0972602739726027,
"grad_norm": 10.683832168579102,
"learning_rate": 2.7904616945712835e-05,
"loss": 0.3966,
"step": 1090
},
{
"epoch": 1.1018264840182648,
"grad_norm": 1.0494991540908813,
"learning_rate": 2.765093860984272e-05,
"loss": 0.3233,
"step": 1100
},
{
"epoch": 1.106392694063927,
"grad_norm": 4.736073017120361,
"learning_rate": 2.7397260273972603e-05,
"loss": 0.3742,
"step": 1110
},
{
"epoch": 1.110958904109589,
"grad_norm": 8.027792930603027,
"learning_rate": 2.7143581938102484e-05,
"loss": 0.3813,
"step": 1120
},
{
"epoch": 1.1155251141552511,
"grad_norm": 2.795949935913086,
"learning_rate": 2.6889903602232368e-05,
"loss": 0.2194,
"step": 1130
},
{
"epoch": 1.1200913242009132,
"grad_norm": 16.23459815979004,
"learning_rate": 2.6636225266362252e-05,
"loss": 0.4115,
"step": 1140
},
{
"epoch": 1.1246575342465754,
"grad_norm": 8.320725440979004,
"learning_rate": 2.6382546930492136e-05,
"loss": 0.2755,
"step": 1150
},
{
"epoch": 1.1292237442922375,
"grad_norm": 2.192859649658203,
"learning_rate": 2.612886859462202e-05,
"loss": 0.2373,
"step": 1160
},
{
"epoch": 1.1337899543378995,
"grad_norm": 6.194462299346924,
"learning_rate": 2.58751902587519e-05,
"loss": 0.2048,
"step": 1170
},
{
"epoch": 1.1383561643835616,
"grad_norm": 6.522688388824463,
"learning_rate": 2.5621511922881785e-05,
"loss": 0.2106,
"step": 1180
},
{
"epoch": 1.1429223744292236,
"grad_norm": 7.976909160614014,
"learning_rate": 2.536783358701167e-05,
"loss": 0.1977,
"step": 1190
},
{
"epoch": 1.1474885844748859,
"grad_norm": 10.373821258544922,
"learning_rate": 2.5114155251141553e-05,
"loss": 0.4609,
"step": 1200
},
{
"epoch": 1.152054794520548,
"grad_norm": 8.701196670532227,
"learning_rate": 2.4860476915271437e-05,
"loss": 0.3093,
"step": 1210
},
{
"epoch": 1.15662100456621,
"grad_norm": 0.8994976878166199,
"learning_rate": 2.4606798579401318e-05,
"loss": 0.1242,
"step": 1220
},
{
"epoch": 1.1611872146118722,
"grad_norm": 4.23283576965332,
"learning_rate": 2.4353120243531202e-05,
"loss": 0.1627,
"step": 1230
},
{
"epoch": 1.1657534246575343,
"grad_norm": 4.330513000488281,
"learning_rate": 2.4099441907661086e-05,
"loss": 0.3118,
"step": 1240
},
{
"epoch": 1.1703196347031963,
"grad_norm": 2.304389715194702,
"learning_rate": 2.384576357179097e-05,
"loss": 0.218,
"step": 1250
},
{
"epoch": 1.1748858447488584,
"grad_norm": 6.340790271759033,
"learning_rate": 2.359208523592085e-05,
"loss": 0.2833,
"step": 1260
},
{
"epoch": 1.1794520547945206,
"grad_norm": 2.6649558544158936,
"learning_rate": 2.3338406900050735e-05,
"loss": 0.2335,
"step": 1270
},
{
"epoch": 1.1840182648401827,
"grad_norm": 0.8412348628044128,
"learning_rate": 2.308472856418062e-05,
"loss": 0.1889,
"step": 1280
},
{
"epoch": 1.1885844748858447,
"grad_norm": 5.932245254516602,
"learning_rate": 2.2831050228310503e-05,
"loss": 0.2006,
"step": 1290
},
{
"epoch": 1.1931506849315068,
"grad_norm": 9.092105865478516,
"learning_rate": 2.2577371892440387e-05,
"loss": 0.3411,
"step": 1300
},
{
"epoch": 1.197716894977169,
"grad_norm": 6.397707939147949,
"learning_rate": 2.2323693556570268e-05,
"loss": 0.1739,
"step": 1310
},
{
"epoch": 1.202283105022831,
"grad_norm": 3.637505292892456,
"learning_rate": 2.207001522070015e-05,
"loss": 0.2351,
"step": 1320
},
{
"epoch": 1.2068493150684931,
"grad_norm": 3.8445942401885986,
"learning_rate": 2.1816336884830036e-05,
"loss": 0.3909,
"step": 1330
},
{
"epoch": 1.2114155251141552,
"grad_norm": 1.331699252128601,
"learning_rate": 2.156265854895992e-05,
"loss": 0.154,
"step": 1340
},
{
"epoch": 1.2159817351598174,
"grad_norm": 9.704405784606934,
"learning_rate": 2.13089802130898e-05,
"loss": 0.1631,
"step": 1350
},
{
"epoch": 1.2205479452054795,
"grad_norm": 2.271298885345459,
"learning_rate": 2.1055301877219685e-05,
"loss": 0.1551,
"step": 1360
},
{
"epoch": 1.2251141552511415,
"grad_norm": 10.894644737243652,
"learning_rate": 2.080162354134957e-05,
"loss": 0.3072,
"step": 1370
},
{
"epoch": 1.2296803652968036,
"grad_norm": 10.65208625793457,
"learning_rate": 2.0547945205479453e-05,
"loss": 0.2142,
"step": 1380
},
{
"epoch": 1.2342465753424658,
"grad_norm": 15.530343055725098,
"learning_rate": 2.0294266869609337e-05,
"loss": 0.2107,
"step": 1390
},
{
"epoch": 1.238812785388128,
"grad_norm": 7.166269779205322,
"learning_rate": 2.0040588533739217e-05,
"loss": 0.2599,
"step": 1400
},
{
"epoch": 1.24337899543379,
"grad_norm": 1.9615888595581055,
"learning_rate": 1.97869101978691e-05,
"loss": 0.1511,
"step": 1410
},
{
"epoch": 1.247945205479452,
"grad_norm": 6.840981960296631,
"learning_rate": 1.9533231861998985e-05,
"loss": 0.3887,
"step": 1420
},
{
"epoch": 1.252511415525114,
"grad_norm": 5.138172626495361,
"learning_rate": 1.927955352612887e-05,
"loss": 0.2284,
"step": 1430
},
{
"epoch": 1.2570776255707763,
"grad_norm": 5.770719051361084,
"learning_rate": 1.902587519025875e-05,
"loss": 0.3119,
"step": 1440
},
{
"epoch": 1.2616438356164383,
"grad_norm": 1.5914487838745117,
"learning_rate": 1.8772196854388634e-05,
"loss": 0.0873,
"step": 1450
},
{
"epoch": 1.2662100456621004,
"grad_norm": 2.5335633754730225,
"learning_rate": 1.8518518518518518e-05,
"loss": 0.2629,
"step": 1460
},
{
"epoch": 1.2707762557077626,
"grad_norm": 10.850088119506836,
"learning_rate": 1.8264840182648402e-05,
"loss": 0.3098,
"step": 1470
},
{
"epoch": 1.2753424657534247,
"grad_norm": 2.9598915576934814,
"learning_rate": 1.8011161846778286e-05,
"loss": 0.0882,
"step": 1480
},
{
"epoch": 1.2799086757990867,
"grad_norm": 2.021665096282959,
"learning_rate": 1.7757483510908167e-05,
"loss": 0.2079,
"step": 1490
},
{
"epoch": 1.2844748858447488,
"grad_norm": 6.0909833908081055,
"learning_rate": 1.750380517503805e-05,
"loss": 0.1287,
"step": 1500
},
{
"epoch": 1.2890410958904108,
"grad_norm": 7.84592866897583,
"learning_rate": 1.7250126839167935e-05,
"loss": 0.1406,
"step": 1510
},
{
"epoch": 1.293607305936073,
"grad_norm": 3.269815444946289,
"learning_rate": 1.699644850329782e-05,
"loss": 0.2997,
"step": 1520
},
{
"epoch": 1.2981735159817351,
"grad_norm": 0.7504135966300964,
"learning_rate": 1.67427701674277e-05,
"loss": 0.1724,
"step": 1530
},
{
"epoch": 1.3027397260273972,
"grad_norm": 2.8693363666534424,
"learning_rate": 1.6489091831557584e-05,
"loss": 0.1543,
"step": 1540
},
{
"epoch": 1.3073059360730594,
"grad_norm": 6.736435890197754,
"learning_rate": 1.6235413495687468e-05,
"loss": 0.1396,
"step": 1550
},
{
"epoch": 1.3118721461187215,
"grad_norm": 0.3472974896430969,
"learning_rate": 1.5981735159817352e-05,
"loss": 0.2981,
"step": 1560
},
{
"epoch": 1.3164383561643835,
"grad_norm": 4.580450057983398,
"learning_rate": 1.5728056823947236e-05,
"loss": 0.2362,
"step": 1570
},
{
"epoch": 1.3210045662100456,
"grad_norm": 1.7592848539352417,
"learning_rate": 1.5474378488077117e-05,
"loss": 0.2429,
"step": 1580
},
{
"epoch": 1.3255707762557076,
"grad_norm": 1.0239814519882202,
"learning_rate": 1.5220700152207e-05,
"loss": 0.1309,
"step": 1590
},
{
"epoch": 1.33013698630137,
"grad_norm": 1.68032705783844,
"learning_rate": 1.4967021816336885e-05,
"loss": 0.2172,
"step": 1600
},
{
"epoch": 1.334703196347032,
"grad_norm": 5.960744380950928,
"learning_rate": 1.4713343480466767e-05,
"loss": 0.2709,
"step": 1610
},
{
"epoch": 1.339269406392694,
"grad_norm": 9.478919982910156,
"learning_rate": 1.4459665144596651e-05,
"loss": 0.2803,
"step": 1620
},
{
"epoch": 1.3438356164383563,
"grad_norm": 0.2718620002269745,
"learning_rate": 1.4205986808726535e-05,
"loss": 0.1481,
"step": 1630
},
{
"epoch": 1.3484018264840183,
"grad_norm": 0.5140727162361145,
"learning_rate": 1.3952308472856418e-05,
"loss": 0.1621,
"step": 1640
},
{
"epoch": 1.3529680365296803,
"grad_norm": 8.044154167175293,
"learning_rate": 1.3698630136986302e-05,
"loss": 0.2557,
"step": 1650
},
{
"epoch": 1.3575342465753424,
"grad_norm": 0.42045390605926514,
"learning_rate": 1.3444951801116184e-05,
"loss": 0.1946,
"step": 1660
},
{
"epoch": 1.3621004566210044,
"grad_norm": 1.3292018175125122,
"learning_rate": 1.3191273465246068e-05,
"loss": 0.1813,
"step": 1670
},
{
"epoch": 1.3666666666666667,
"grad_norm": 0.8583564758300781,
"learning_rate": 1.293759512937595e-05,
"loss": 0.1753,
"step": 1680
},
{
"epoch": 1.3712328767123287,
"grad_norm": 5.7116546630859375,
"learning_rate": 1.2683916793505835e-05,
"loss": 0.2727,
"step": 1690
},
{
"epoch": 1.3757990867579908,
"grad_norm": 0.348532497882843,
"learning_rate": 1.2430238457635719e-05,
"loss": 0.1961,
"step": 1700
},
{
"epoch": 1.380365296803653,
"grad_norm": 9.204560279846191,
"learning_rate": 1.2176560121765601e-05,
"loss": 0.257,
"step": 1710
},
{
"epoch": 1.384931506849315,
"grad_norm": 5.398329257965088,
"learning_rate": 1.1922881785895485e-05,
"loss": 0.1348,
"step": 1720
},
{
"epoch": 1.3894977168949771,
"grad_norm": 0.43983083963394165,
"learning_rate": 1.1669203450025367e-05,
"loss": 0.2507,
"step": 1730
},
{
"epoch": 1.3940639269406392,
"grad_norm": 0.9157253503799438,
"learning_rate": 1.1415525114155251e-05,
"loss": 0.193,
"step": 1740
},
{
"epoch": 1.3986301369863012,
"grad_norm": 0.855165421962738,
"learning_rate": 1.1161846778285134e-05,
"loss": 0.1291,
"step": 1750
},
{
"epoch": 1.4004566210045661,
"eval_accuracy": 0.6516264428121721,
"eval_loss": 1.277839183807373,
"eval_runtime": 178.2589,
"eval_samples_per_second": 5.346,
"eval_steps_per_second": 2.676,
"step": 1754
},
{
"epoch": 2.0027397260273974,
"grad_norm": 8.00808334350586,
"learning_rate": 1.0908168442415018e-05,
"loss": 0.1597,
"step": 1760
},
{
"epoch": 2.007305936073059,
"grad_norm": 0.1681767702102661,
"learning_rate": 1.06544901065449e-05,
"loss": 0.0838,
"step": 1770
},
{
"epoch": 2.0118721461187214,
"grad_norm": 12.16120719909668,
"learning_rate": 1.0400811770674784e-05,
"loss": 0.1621,
"step": 1780
},
{
"epoch": 2.0164383561643837,
"grad_norm": 1.8593238592147827,
"learning_rate": 1.0147133434804668e-05,
"loss": 0.0698,
"step": 1790
},
{
"epoch": 2.0210045662100455,
"grad_norm": 1.4579780101776123,
"learning_rate": 9.89345509893455e-06,
"loss": 0.2562,
"step": 1800
},
{
"epoch": 2.025570776255708,
"grad_norm": 2.344836711883545,
"learning_rate": 9.639776763064435e-06,
"loss": 0.257,
"step": 1810
},
{
"epoch": 2.03013698630137,
"grad_norm": 8.14854621887207,
"learning_rate": 9.386098427194317e-06,
"loss": 0.2149,
"step": 1820
},
{
"epoch": 2.034703196347032,
"grad_norm": 11.254741668701172,
"learning_rate": 9.132420091324201e-06,
"loss": 0.2175,
"step": 1830
},
{
"epoch": 2.039269406392694,
"grad_norm": 8.984929084777832,
"learning_rate": 8.878741755454084e-06,
"loss": 0.1927,
"step": 1840
},
{
"epoch": 2.043835616438356,
"grad_norm": 13.033331871032715,
"learning_rate": 8.625063419583968e-06,
"loss": 0.2202,
"step": 1850
},
{
"epoch": 2.0484018264840183,
"grad_norm": 1.0010125637054443,
"learning_rate": 8.37138508371385e-06,
"loss": 0.1063,
"step": 1860
},
{
"epoch": 2.0529680365296805,
"grad_norm": 0.2957708239555359,
"learning_rate": 8.117706747843734e-06,
"loss": 0.1509,
"step": 1870
},
{
"epoch": 2.0575342465753423,
"grad_norm": 0.20592088997364044,
"learning_rate": 7.864028411973618e-06,
"loss": 0.1519,
"step": 1880
},
{
"epoch": 2.0621004566210046,
"grad_norm": 2.5575947761535645,
"learning_rate": 7.6103500761035e-06,
"loss": 0.1487,
"step": 1890
},
{
"epoch": 2.066666666666667,
"grad_norm": 3.2097675800323486,
"learning_rate": 7.356671740233384e-06,
"loss": 0.1506,
"step": 1900
},
{
"epoch": 2.0712328767123287,
"grad_norm": 7.255710124969482,
"learning_rate": 7.102993404363268e-06,
"loss": 0.2091,
"step": 1910
},
{
"epoch": 2.075799086757991,
"grad_norm": 0.6733071804046631,
"learning_rate": 6.849315068493151e-06,
"loss": 0.1179,
"step": 1920
},
{
"epoch": 2.080365296803653,
"grad_norm": 5.307835102081299,
"learning_rate": 6.595636732623034e-06,
"loss": 0.2213,
"step": 1930
},
{
"epoch": 2.084931506849315,
"grad_norm": 2.307908535003662,
"learning_rate": 6.341958396752917e-06,
"loss": 0.0695,
"step": 1940
},
{
"epoch": 2.0894977168949773,
"grad_norm": 9.304193496704102,
"learning_rate": 6.0882800608828005e-06,
"loss": 0.13,
"step": 1950
},
{
"epoch": 2.094063926940639,
"grad_norm": 0.5198363661766052,
"learning_rate": 5.834601725012684e-06,
"loss": 0.1376,
"step": 1960
},
{
"epoch": 2.0986301369863014,
"grad_norm": 4.95096492767334,
"learning_rate": 5.580923389142567e-06,
"loss": 0.1322,
"step": 1970
},
{
"epoch": 2.1031963470319637,
"grad_norm": 4.969466686248779,
"learning_rate": 5.32724505327245e-06,
"loss": 0.1839,
"step": 1980
},
{
"epoch": 2.1077625570776255,
"grad_norm": 0.14146043360233307,
"learning_rate": 5.073566717402334e-06,
"loss": 0.2181,
"step": 1990
},
{
"epoch": 2.1123287671232878,
"grad_norm": 0.31362447142601013,
"learning_rate": 4.819888381532217e-06,
"loss": 0.2004,
"step": 2000
},
{
"epoch": 2.1168949771689496,
"grad_norm": 9.072457313537598,
"learning_rate": 4.566210045662101e-06,
"loss": 0.158,
"step": 2010
},
{
"epoch": 2.121461187214612,
"grad_norm": 6.152866363525391,
"learning_rate": 4.312531709791984e-06,
"loss": 0.1529,
"step": 2020
},
{
"epoch": 2.126027397260274,
"grad_norm": 2.435375452041626,
"learning_rate": 4.058853373921867e-06,
"loss": 0.172,
"step": 2030
},
{
"epoch": 2.130593607305936,
"grad_norm": 7.807186603546143,
"learning_rate": 3.80517503805175e-06,
"loss": 0.1801,
"step": 2040
},
{
"epoch": 2.135159817351598,
"grad_norm": 2.6992197036743164,
"learning_rate": 3.551496702181634e-06,
"loss": 0.1777,
"step": 2050
},
{
"epoch": 2.1397260273972605,
"grad_norm": 9.397564888000488,
"learning_rate": 3.297818366311517e-06,
"loss": 0.0939,
"step": 2060
},
{
"epoch": 2.1442922374429223,
"grad_norm": 3.4112205505371094,
"learning_rate": 3.0441400304414002e-06,
"loss": 0.1201,
"step": 2070
},
{
"epoch": 2.1488584474885846,
"grad_norm": 0.36758846044540405,
"learning_rate": 2.7904616945712835e-06,
"loss": 0.167,
"step": 2080
},
{
"epoch": 2.1534246575342464,
"grad_norm": 0.20398499071598053,
"learning_rate": 2.536783358701167e-06,
"loss": 0.0453,
"step": 2090
},
{
"epoch": 2.1579908675799087,
"grad_norm": 0.14668098092079163,
"learning_rate": 2.2831050228310503e-06,
"loss": 0.0674,
"step": 2100
},
{
"epoch": 2.162557077625571,
"grad_norm": 3.2575571537017822,
"learning_rate": 2.0294266869609335e-06,
"loss": 0.0917,
"step": 2110
},
{
"epoch": 2.1671232876712327,
"grad_norm": 0.5656366348266602,
"learning_rate": 1.775748351090817e-06,
"loss": 0.1194,
"step": 2120
},
{
"epoch": 2.171689497716895,
"grad_norm": 7.743866443634033,
"learning_rate": 1.5220700152207001e-06,
"loss": 0.1199,
"step": 2130
},
{
"epoch": 2.1762557077625573,
"grad_norm": 1.3446861505508423,
"learning_rate": 1.2683916793505835e-06,
"loss": 0.0521,
"step": 2140
},
{
"epoch": 2.180821917808219,
"grad_norm": 0.33492910861968994,
"learning_rate": 1.0147133434804667e-06,
"loss": 0.0444,
"step": 2150
},
{
"epoch": 2.1853881278538814,
"grad_norm": 0.12891508638858795,
"learning_rate": 7.610350076103501e-07,
"loss": 0.1172,
"step": 2160
},
{
"epoch": 2.189954337899543,
"grad_norm": 5.329843521118164,
"learning_rate": 5.073566717402334e-07,
"loss": 0.1813,
"step": 2170
},
{
"epoch": 2.1945205479452055,
"grad_norm": 2.9594337940216064,
"learning_rate": 2.536783358701167e-07,
"loss": 0.2105,
"step": 2180
},
{
"epoch": 2.1990867579908677,
"grad_norm": 1.7173917293548584,
"learning_rate": 0.0,
"loss": 0.183,
"step": 2190
},
{
"epoch": 2.1990867579908677,
"eval_accuracy": 0.6295907660020986,
"eval_loss": 1.3431484699249268,
"eval_runtime": 185.0677,
"eval_samples_per_second": 5.149,
"eval_steps_per_second": 2.577,
"step": 2190
},
{
"epoch": 2.1990867579908677,
"step": 2190,
"total_flos": 1.5353993990024331e+19,
"train_loss": 0.7283184543456117,
"train_runtime": 5503.9878,
"train_samples_per_second": 3.183,
"train_steps_per_second": 0.398
},
{
"epoch": 2.1990867579908677,
"eval_accuracy": 0.6516264428121721,
"eval_loss": 1.277839183807373,
"eval_runtime": 188.0153,
"eval_samples_per_second": 5.069,
"eval_steps_per_second": 2.537,
"step": 2190
},
{
"epoch": 2.1990867579908677,
"eval_accuracy": 0.7749803304484658,
"eval_loss": 0.9055941104888916,
"eval_runtime": 256.8953,
"eval_samples_per_second": 4.948,
"eval_steps_per_second": 2.476,
"step": 2190
}
],
"logging_steps": 10,
"max_steps": 2190,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 2,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.5353993990024331e+19,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}