sbf-bert-offensive / checkpoint-1665 /trainer_state.json
anoaky's picture
Training in progress, epoch 3, checkpoint
f91832b verified
raw
history blame
30.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1665,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0,
"eval_f1": 0.6562770562770562,
"eval_loss": 0.7476533651351929,
"eval_precision": 0.48976954555244456,
"eval_recall": 0.9943156974202011,
"eval_runtime": 16.4688,
"eval_samples_per_second": 283.749,
"eval_steps_per_second": 35.522,
"step": 0
},
{
"epoch": 0.018026137899954935,
"grad_norm": 14.20566463470459,
"learning_rate": 4.981949458483755e-05,
"loss": 0.6876,
"step": 10
},
{
"epoch": 0.03605227579990987,
"grad_norm": 27.79998016357422,
"learning_rate": 4.963898916967509e-05,
"loss": 0.5988,
"step": 20
},
{
"epoch": 0.054078413699864804,
"grad_norm": 39.63682556152344,
"learning_rate": 4.945848375451264e-05,
"loss": 0.5677,
"step": 30
},
{
"epoch": 0.07210455159981974,
"grad_norm": 38.49867630004883,
"learning_rate": 4.927797833935018e-05,
"loss": 0.5922,
"step": 40
},
{
"epoch": 0.09013068949977468,
"grad_norm": 52.28614044189453,
"learning_rate": 4.909747292418773e-05,
"loss": 0.5506,
"step": 50
},
{
"epoch": 0.10815682739972961,
"grad_norm": 29.180490493774414,
"learning_rate": 4.891696750902527e-05,
"loss": 0.5173,
"step": 60
},
{
"epoch": 0.12618296529968454,
"grad_norm": 33.55237579345703,
"learning_rate": 4.873646209386282e-05,
"loss": 0.4931,
"step": 70
},
{
"epoch": 0.14420910319963948,
"grad_norm": 32.063228607177734,
"learning_rate": 4.855595667870036e-05,
"loss": 0.483,
"step": 80
},
{
"epoch": 0.16223524109959442,
"grad_norm": 24.234926223754883,
"learning_rate": 4.837545126353791e-05,
"loss": 0.47,
"step": 90
},
{
"epoch": 0.18026137899954936,
"grad_norm": 33.66775894165039,
"learning_rate": 4.819494584837546e-05,
"loss": 0.4564,
"step": 100
},
{
"epoch": 0.19828751689950427,
"grad_norm": 53.322574615478516,
"learning_rate": 4.8014440433213e-05,
"loss": 0.5083,
"step": 110
},
{
"epoch": 0.21631365479945922,
"grad_norm": 29.443470001220703,
"learning_rate": 4.783393501805055e-05,
"loss": 0.463,
"step": 120
},
{
"epoch": 0.23433979269941416,
"grad_norm": 33.13877487182617,
"learning_rate": 4.765342960288809e-05,
"loss": 0.4626,
"step": 130
},
{
"epoch": 0.25236593059936907,
"grad_norm": 25.509536743164062,
"learning_rate": 4.747292418772563e-05,
"loss": 0.5113,
"step": 140
},
{
"epoch": 0.270392068499324,
"grad_norm": 21.015031814575195,
"learning_rate": 4.7292418772563177e-05,
"loss": 0.4611,
"step": 150
},
{
"epoch": 0.28841820639927895,
"grad_norm": 28.079387664794922,
"learning_rate": 4.711191335740072e-05,
"loss": 0.4783,
"step": 160
},
{
"epoch": 0.3064443442992339,
"grad_norm": 19.978599548339844,
"learning_rate": 4.693140794223827e-05,
"loss": 0.5026,
"step": 170
},
{
"epoch": 0.32447048219918884,
"grad_norm": 29.316265106201172,
"learning_rate": 4.675090252707581e-05,
"loss": 0.4906,
"step": 180
},
{
"epoch": 0.3424966200991438,
"grad_norm": 18.79282569885254,
"learning_rate": 4.657039711191336e-05,
"loss": 0.5016,
"step": 190
},
{
"epoch": 0.3605227579990987,
"grad_norm": 18.66801643371582,
"learning_rate": 4.63898916967509e-05,
"loss": 0.4502,
"step": 200
},
{
"epoch": 0.3785488958990536,
"grad_norm": 27.929332733154297,
"learning_rate": 4.620938628158845e-05,
"loss": 0.4302,
"step": 210
},
{
"epoch": 0.39657503379900855,
"grad_norm": 20.541879653930664,
"learning_rate": 4.602888086642599e-05,
"loss": 0.4496,
"step": 220
},
{
"epoch": 0.4146011716989635,
"grad_norm": 19.034873962402344,
"learning_rate": 4.584837545126354e-05,
"loss": 0.4541,
"step": 230
},
{
"epoch": 0.43262730959891843,
"grad_norm": 32.077945709228516,
"learning_rate": 4.566787003610109e-05,
"loss": 0.4562,
"step": 240
},
{
"epoch": 0.45065344749887337,
"grad_norm": 25.475997924804688,
"learning_rate": 4.548736462093863e-05,
"loss": 0.4456,
"step": 250
},
{
"epoch": 0.4686795853988283,
"grad_norm": 17.132720947265625,
"learning_rate": 4.530685920577618e-05,
"loss": 0.4629,
"step": 260
},
{
"epoch": 0.48670572329878325,
"grad_norm": 22.313941955566406,
"learning_rate": 4.5126353790613716e-05,
"loss": 0.4444,
"step": 270
},
{
"epoch": 0.5047318611987381,
"grad_norm": 22.373477935791016,
"learning_rate": 4.494584837545127e-05,
"loss": 0.4404,
"step": 280
},
{
"epoch": 0.5227579990986931,
"grad_norm": 16.72430992126465,
"learning_rate": 4.4765342960288806e-05,
"loss": 0.4611,
"step": 290
},
{
"epoch": 0.540784136998648,
"grad_norm": 27.037927627563477,
"learning_rate": 4.458483754512636e-05,
"loss": 0.4272,
"step": 300
},
{
"epoch": 0.558810274898603,
"grad_norm": 31.637985229492188,
"learning_rate": 4.44043321299639e-05,
"loss": 0.448,
"step": 310
},
{
"epoch": 0.5768364127985579,
"grad_norm": 35.010459899902344,
"learning_rate": 4.422382671480145e-05,
"loss": 0.4215,
"step": 320
},
{
"epoch": 0.5948625506985128,
"grad_norm": 26.25156021118164,
"learning_rate": 4.404332129963899e-05,
"loss": 0.4396,
"step": 330
},
{
"epoch": 0.6128886885984678,
"grad_norm": 19.4619083404541,
"learning_rate": 4.386281588447654e-05,
"loss": 0.4378,
"step": 340
},
{
"epoch": 0.6309148264984227,
"grad_norm": 19.16460609436035,
"learning_rate": 4.368231046931408e-05,
"loss": 0.4366,
"step": 350
},
{
"epoch": 0.6489409643983777,
"grad_norm": 30.745384216308594,
"learning_rate": 4.350180505415163e-05,
"loss": 0.4062,
"step": 360
},
{
"epoch": 0.6669671022983326,
"grad_norm": 16.873245239257812,
"learning_rate": 4.332129963898917e-05,
"loss": 0.439,
"step": 370
},
{
"epoch": 0.6849932401982876,
"grad_norm": 25.771486282348633,
"learning_rate": 4.314079422382672e-05,
"loss": 0.4053,
"step": 380
},
{
"epoch": 0.7030193780982424,
"grad_norm": 14.48513412475586,
"learning_rate": 4.296028880866426e-05,
"loss": 0.4164,
"step": 390
},
{
"epoch": 0.7210455159981974,
"grad_norm": 34.993465423583984,
"learning_rate": 4.277978339350181e-05,
"loss": 0.4438,
"step": 400
},
{
"epoch": 0.7390716538981523,
"grad_norm": 22.576608657836914,
"learning_rate": 4.259927797833935e-05,
"loss": 0.4518,
"step": 410
},
{
"epoch": 0.7570977917981072,
"grad_norm": 19.088132858276367,
"learning_rate": 4.24187725631769e-05,
"loss": 0.4495,
"step": 420
},
{
"epoch": 0.7751239296980622,
"grad_norm": 35.2998046875,
"learning_rate": 4.223826714801444e-05,
"loss": 0.3897,
"step": 430
},
{
"epoch": 0.7931500675980171,
"grad_norm": 20.395572662353516,
"learning_rate": 4.205776173285199e-05,
"loss": 0.4818,
"step": 440
},
{
"epoch": 0.8111762054979721,
"grad_norm": 21.174386978149414,
"learning_rate": 4.187725631768953e-05,
"loss": 0.4103,
"step": 450
},
{
"epoch": 0.829202343397927,
"grad_norm": 32.97265625,
"learning_rate": 4.169675090252708e-05,
"loss": 0.4036,
"step": 460
},
{
"epoch": 0.847228481297882,
"grad_norm": 23.78690528869629,
"learning_rate": 4.151624548736462e-05,
"loss": 0.4079,
"step": 470
},
{
"epoch": 0.8652546191978369,
"grad_norm": 17.087247848510742,
"learning_rate": 4.1335740072202167e-05,
"loss": 0.4371,
"step": 480
},
{
"epoch": 0.8832807570977917,
"grad_norm": 13.810160636901855,
"learning_rate": 4.115523465703972e-05,
"loss": 0.4249,
"step": 490
},
{
"epoch": 0.9013068949977467,
"grad_norm": 20.197996139526367,
"learning_rate": 4.0974729241877256e-05,
"loss": 0.3991,
"step": 500
},
{
"epoch": 0.9193330328977016,
"grad_norm": 33.1153564453125,
"learning_rate": 4.079422382671481e-05,
"loss": 0.3994,
"step": 510
},
{
"epoch": 0.9373591707976566,
"grad_norm": 18.20081901550293,
"learning_rate": 4.0613718411552346e-05,
"loss": 0.4176,
"step": 520
},
{
"epoch": 0.9553853086976115,
"grad_norm": 20.09193992614746,
"learning_rate": 4.043321299638989e-05,
"loss": 0.4297,
"step": 530
},
{
"epoch": 0.9734114465975665,
"grad_norm": 15.939692497253418,
"learning_rate": 4.0252707581227436e-05,
"loss": 0.4309,
"step": 540
},
{
"epoch": 0.9914375844975214,
"grad_norm": 24.811601638793945,
"learning_rate": 4.007220216606498e-05,
"loss": 0.3908,
"step": 550
},
{
"epoch": 1.0,
"eval_f1": 0.8367177786987153,
"eval_loss": 0.37827184796333313,
"eval_precision": 0.7951949586451359,
"eval_recall": 0.8828159160472234,
"eval_runtime": 15.9582,
"eval_samples_per_second": 292.827,
"eval_steps_per_second": 36.658,
"step": 555
},
{
"epoch": 1.0090130689499774,
"grad_norm": 16.960424423217773,
"learning_rate": 3.989169675090253e-05,
"loss": 0.3719,
"step": 560
},
{
"epoch": 1.0270392068499323,
"grad_norm": 15.816173553466797,
"learning_rate": 3.971119133574007e-05,
"loss": 0.3238,
"step": 570
},
{
"epoch": 1.0450653447498874,
"grad_norm": 19.375043869018555,
"learning_rate": 3.953068592057762e-05,
"loss": 0.3516,
"step": 580
},
{
"epoch": 1.0630914826498423,
"grad_norm": 36.740943908691406,
"learning_rate": 3.935018050541516e-05,
"loss": 0.3329,
"step": 590
},
{
"epoch": 1.0811176205497972,
"grad_norm": 23.23931884765625,
"learning_rate": 3.916967509025271e-05,
"loss": 0.345,
"step": 600
},
{
"epoch": 1.099143758449752,
"grad_norm": 23.009553909301758,
"learning_rate": 3.898916967509025e-05,
"loss": 0.3054,
"step": 610
},
{
"epoch": 1.117169896349707,
"grad_norm": 18.865903854370117,
"learning_rate": 3.88086642599278e-05,
"loss": 0.3236,
"step": 620
},
{
"epoch": 1.135196034249662,
"grad_norm": 21.043642044067383,
"learning_rate": 3.862815884476535e-05,
"loss": 0.3092,
"step": 630
},
{
"epoch": 1.153222172149617,
"grad_norm": 22.50505256652832,
"learning_rate": 3.844765342960289e-05,
"loss": 0.3274,
"step": 640
},
{
"epoch": 1.1712483100495719,
"grad_norm": 20.4163818359375,
"learning_rate": 3.826714801444044e-05,
"loss": 0.2459,
"step": 650
},
{
"epoch": 1.1892744479495267,
"grad_norm": 21.210535049438477,
"learning_rate": 3.8086642599277976e-05,
"loss": 0.3365,
"step": 660
},
{
"epoch": 1.2073005858494819,
"grad_norm": 21.084487915039062,
"learning_rate": 3.790613718411553e-05,
"loss": 0.3242,
"step": 670
},
{
"epoch": 1.2253267237494367,
"grad_norm": 22.721454620361328,
"learning_rate": 3.7725631768953066e-05,
"loss": 0.2844,
"step": 680
},
{
"epoch": 1.2433528616493916,
"grad_norm": 49.28988265991211,
"learning_rate": 3.754512635379062e-05,
"loss": 0.3247,
"step": 690
},
{
"epoch": 1.2613789995493465,
"grad_norm": 48.35593795776367,
"learning_rate": 3.7364620938628155e-05,
"loss": 0.3217,
"step": 700
},
{
"epoch": 1.2794051374493014,
"grad_norm": 30.997228622436523,
"learning_rate": 3.718411552346571e-05,
"loss": 0.2854,
"step": 710
},
{
"epoch": 1.2974312753492563,
"grad_norm": 35.57414627075195,
"learning_rate": 3.700361010830325e-05,
"loss": 0.3315,
"step": 720
},
{
"epoch": 1.3154574132492114,
"grad_norm": 27.17759895324707,
"learning_rate": 3.68231046931408e-05,
"loss": 0.2965,
"step": 730
},
{
"epoch": 1.3334835511491663,
"grad_norm": 31.296627044677734,
"learning_rate": 3.664259927797834e-05,
"loss": 0.3396,
"step": 740
},
{
"epoch": 1.3515096890491212,
"grad_norm": 18.639175415039062,
"learning_rate": 3.646209386281589e-05,
"loss": 0.3178,
"step": 750
},
{
"epoch": 1.3695358269490763,
"grad_norm": 31.972665786743164,
"learning_rate": 3.628158844765343e-05,
"loss": 0.2927,
"step": 760
},
{
"epoch": 1.3875619648490312,
"grad_norm": 14.907872200012207,
"learning_rate": 3.610108303249098e-05,
"loss": 0.3391,
"step": 770
},
{
"epoch": 1.405588102748986,
"grad_norm": 33.58858871459961,
"learning_rate": 3.592057761732852e-05,
"loss": 0.3111,
"step": 780
},
{
"epoch": 1.423614240648941,
"grad_norm": 17.019819259643555,
"learning_rate": 3.574007220216607e-05,
"loss": 0.3401,
"step": 790
},
{
"epoch": 1.4416403785488958,
"grad_norm": 27.324296951293945,
"learning_rate": 3.555956678700361e-05,
"loss": 0.2917,
"step": 800
},
{
"epoch": 1.4596665164488507,
"grad_norm": 30.170120239257812,
"learning_rate": 3.537906137184116e-05,
"loss": 0.2871,
"step": 810
},
{
"epoch": 1.4776926543488058,
"grad_norm": 44.26614761352539,
"learning_rate": 3.51985559566787e-05,
"loss": 0.3307,
"step": 820
},
{
"epoch": 1.4957187922487607,
"grad_norm": 28.978620529174805,
"learning_rate": 3.5018050541516247e-05,
"loss": 0.3207,
"step": 830
},
{
"epoch": 1.5137449301487156,
"grad_norm": 22.961040496826172,
"learning_rate": 3.483754512635379e-05,
"loss": 0.3075,
"step": 840
},
{
"epoch": 1.5317710680486707,
"grad_norm": 22.511327743530273,
"learning_rate": 3.4657039711191336e-05,
"loss": 0.3035,
"step": 850
},
{
"epoch": 1.5497972059486256,
"grad_norm": 33.944190979003906,
"learning_rate": 3.447653429602888e-05,
"loss": 0.3243,
"step": 860
},
{
"epoch": 1.5678233438485805,
"grad_norm": 24.72734832763672,
"learning_rate": 3.4296028880866426e-05,
"loss": 0.3403,
"step": 870
},
{
"epoch": 1.5858494817485354,
"grad_norm": 31.211387634277344,
"learning_rate": 3.411552346570397e-05,
"loss": 0.3142,
"step": 880
},
{
"epoch": 1.6038756196484902,
"grad_norm": 23.974918365478516,
"learning_rate": 3.3935018050541516e-05,
"loss": 0.3015,
"step": 890
},
{
"epoch": 1.6219017575484451,
"grad_norm": 30.705829620361328,
"learning_rate": 3.375451263537907e-05,
"loss": 0.3529,
"step": 900
},
{
"epoch": 1.6399278954484002,
"grad_norm": 20.697528839111328,
"learning_rate": 3.3574007220216606e-05,
"loss": 0.302,
"step": 910
},
{
"epoch": 1.6579540333483551,
"grad_norm": 28.308101654052734,
"learning_rate": 3.339350180505416e-05,
"loss": 0.2735,
"step": 920
},
{
"epoch": 1.67598017124831,
"grad_norm": 22.28900909423828,
"learning_rate": 3.3212996389891696e-05,
"loss": 0.3194,
"step": 930
},
{
"epoch": 1.694006309148265,
"grad_norm": 29.04107666015625,
"learning_rate": 3.303249097472924e-05,
"loss": 0.3265,
"step": 940
},
{
"epoch": 1.71203244704822,
"grad_norm": 40.08032989501953,
"learning_rate": 3.2851985559566786e-05,
"loss": 0.3331,
"step": 950
},
{
"epoch": 1.7300585849481749,
"grad_norm": 29.40334129333496,
"learning_rate": 3.267148014440433e-05,
"loss": 0.2664,
"step": 960
},
{
"epoch": 1.7480847228481298,
"grad_norm": 33.51088333129883,
"learning_rate": 3.249097472924188e-05,
"loss": 0.3031,
"step": 970
},
{
"epoch": 1.7661108607480847,
"grad_norm": 27.003469467163086,
"learning_rate": 3.231046931407942e-05,
"loss": 0.3195,
"step": 980
},
{
"epoch": 1.7841369986480395,
"grad_norm": 26.585309982299805,
"learning_rate": 3.212996389891697e-05,
"loss": 0.3385,
"step": 990
},
{
"epoch": 1.8021631365479944,
"grad_norm": 36.866294860839844,
"learning_rate": 3.194945848375451e-05,
"loss": 0.3029,
"step": 1000
},
{
"epoch": 1.8201892744479495,
"grad_norm": 44.910362243652344,
"learning_rate": 3.176895306859206e-05,
"loss": 0.3239,
"step": 1010
},
{
"epoch": 1.8382154123479044,
"grad_norm": 20.13945198059082,
"learning_rate": 3.15884476534296e-05,
"loss": 0.3071,
"step": 1020
},
{
"epoch": 1.8562415502478595,
"grad_norm": 42.44215774536133,
"learning_rate": 3.140794223826715e-05,
"loss": 0.3246,
"step": 1030
},
{
"epoch": 1.8742676881478144,
"grad_norm": 31.791410446166992,
"learning_rate": 3.12274368231047e-05,
"loss": 0.3273,
"step": 1040
},
{
"epoch": 1.8922938260477693,
"grad_norm": 21.244722366333008,
"learning_rate": 3.104693140794224e-05,
"loss": 0.3093,
"step": 1050
},
{
"epoch": 1.9103199639477242,
"grad_norm": 27.103349685668945,
"learning_rate": 3.086642599277979e-05,
"loss": 0.2694,
"step": 1060
},
{
"epoch": 1.928346101847679,
"grad_norm": 35.73145294189453,
"learning_rate": 3.0685920577617325e-05,
"loss": 0.2934,
"step": 1070
},
{
"epoch": 1.946372239747634,
"grad_norm": 19.97008514404297,
"learning_rate": 3.0505415162454877e-05,
"loss": 0.3493,
"step": 1080
},
{
"epoch": 1.9643983776475888,
"grad_norm": 23.084754943847656,
"learning_rate": 3.032490974729242e-05,
"loss": 0.3243,
"step": 1090
},
{
"epoch": 1.982424515547544,
"grad_norm": 29.515155792236328,
"learning_rate": 3.0144404332129967e-05,
"loss": 0.3235,
"step": 1100
},
{
"epoch": 2.0,
"grad_norm": 24.17915916442871,
"learning_rate": 2.996389891696751e-05,
"loss": 0.2751,
"step": 1110
},
{
"epoch": 2.0,
"eval_f1": 0.8428896708755951,
"eval_loss": 0.3940623104572296,
"eval_precision": 0.800314465408805,
"eval_recall": 0.890249234805422,
"eval_runtime": 12.3989,
"eval_samples_per_second": 376.889,
"eval_steps_per_second": 47.182,
"step": 1110
},
{
"epoch": 2.018026137899955,
"grad_norm": 18.65660858154297,
"learning_rate": 2.9783393501805057e-05,
"loss": 0.1912,
"step": 1120
},
{
"epoch": 2.0360522757999098,
"grad_norm": 15.253448486328125,
"learning_rate": 2.9602888086642598e-05,
"loss": 0.1425,
"step": 1130
},
{
"epoch": 2.0540784136998647,
"grad_norm": 58.709293365478516,
"learning_rate": 2.9422382671480147e-05,
"loss": 0.1945,
"step": 1140
},
{
"epoch": 2.0721045515998195,
"grad_norm": 45.032073974609375,
"learning_rate": 2.924187725631769e-05,
"loss": 0.1477,
"step": 1150
},
{
"epoch": 2.090130689499775,
"grad_norm": 26.54606819152832,
"learning_rate": 2.906137184115524e-05,
"loss": 0.1731,
"step": 1160
},
{
"epoch": 2.1081568273997298,
"grad_norm": 24.949045181274414,
"learning_rate": 2.888086642599278e-05,
"loss": 0.1504,
"step": 1170
},
{
"epoch": 2.1261829652996846,
"grad_norm": 71.09517669677734,
"learning_rate": 2.870036101083033e-05,
"loss": 0.149,
"step": 1180
},
{
"epoch": 2.1442091031996395,
"grad_norm": 43.31296157836914,
"learning_rate": 2.851985559566787e-05,
"loss": 0.1826,
"step": 1190
},
{
"epoch": 2.1622352410995944,
"grad_norm": 23.145519256591797,
"learning_rate": 2.8339350180505413e-05,
"loss": 0.1716,
"step": 1200
},
{
"epoch": 2.1802613789995493,
"grad_norm": 41.57735061645508,
"learning_rate": 2.815884476534296e-05,
"loss": 0.1784,
"step": 1210
},
{
"epoch": 2.198287516899504,
"grad_norm": 51.243560791015625,
"learning_rate": 2.7978339350180506e-05,
"loss": 0.161,
"step": 1220
},
{
"epoch": 2.216313654799459,
"grad_norm": 38.89030838012695,
"learning_rate": 2.779783393501805e-05,
"loss": 0.1688,
"step": 1230
},
{
"epoch": 2.234339792699414,
"grad_norm": 43.14015197753906,
"learning_rate": 2.7617328519855596e-05,
"loss": 0.1916,
"step": 1240
},
{
"epoch": 2.2523659305993693,
"grad_norm": 35.20539474487305,
"learning_rate": 2.7436823104693144e-05,
"loss": 0.1652,
"step": 1250
},
{
"epoch": 2.270392068499324,
"grad_norm": 39.17034912109375,
"learning_rate": 2.7256317689530686e-05,
"loss": 0.1888,
"step": 1260
},
{
"epoch": 2.288418206399279,
"grad_norm": 26.350337982177734,
"learning_rate": 2.7075812274368234e-05,
"loss": 0.1585,
"step": 1270
},
{
"epoch": 2.306444344299234,
"grad_norm": 33.05204772949219,
"learning_rate": 2.6895306859205776e-05,
"loss": 0.1449,
"step": 1280
},
{
"epoch": 2.324470482199189,
"grad_norm": 53.03055191040039,
"learning_rate": 2.6714801444043324e-05,
"loss": 0.1457,
"step": 1290
},
{
"epoch": 2.3424966200991437,
"grad_norm": 13.711820602416992,
"learning_rate": 2.6534296028880866e-05,
"loss": 0.1451,
"step": 1300
},
{
"epoch": 2.3605227579990986,
"grad_norm": 60.76357650756836,
"learning_rate": 2.6353790613718414e-05,
"loss": 0.1803,
"step": 1310
},
{
"epoch": 2.3785488958990535,
"grad_norm": 47.53473663330078,
"learning_rate": 2.617328519855596e-05,
"loss": 0.1405,
"step": 1320
},
{
"epoch": 2.3965750337990084,
"grad_norm": 46.01548385620117,
"learning_rate": 2.59927797833935e-05,
"loss": 0.1701,
"step": 1330
},
{
"epoch": 2.4146011716989637,
"grad_norm": 36.47391128540039,
"learning_rate": 2.581227436823105e-05,
"loss": 0.1362,
"step": 1340
},
{
"epoch": 2.4326273095989186,
"grad_norm": 69.90142059326172,
"learning_rate": 2.563176895306859e-05,
"loss": 0.143,
"step": 1350
},
{
"epoch": 2.4506534474988735,
"grad_norm": 52.88728713989258,
"learning_rate": 2.545126353790614e-05,
"loss": 0.1776,
"step": 1360
},
{
"epoch": 2.4686795853988284,
"grad_norm": 26.982271194458008,
"learning_rate": 2.527075812274368e-05,
"loss": 0.1922,
"step": 1370
},
{
"epoch": 2.4867057232987833,
"grad_norm": 41.308414459228516,
"learning_rate": 2.509025270758123e-05,
"loss": 0.1699,
"step": 1380
},
{
"epoch": 2.504731861198738,
"grad_norm": 68.30561828613281,
"learning_rate": 2.4909747292418774e-05,
"loss": 0.1503,
"step": 1390
},
{
"epoch": 2.522757999098693,
"grad_norm": 32.563026428222656,
"learning_rate": 2.472924187725632e-05,
"loss": 0.2153,
"step": 1400
},
{
"epoch": 2.540784136998648,
"grad_norm": 24.238431930541992,
"learning_rate": 2.4548736462093864e-05,
"loss": 0.1935,
"step": 1410
},
{
"epoch": 2.558810274898603,
"grad_norm": 52.790916442871094,
"learning_rate": 2.436823104693141e-05,
"loss": 0.21,
"step": 1420
},
{
"epoch": 2.576836412798558,
"grad_norm": 39.62615966796875,
"learning_rate": 2.4187725631768953e-05,
"loss": 0.1756,
"step": 1430
},
{
"epoch": 2.5948625506985126,
"grad_norm": 28.220199584960938,
"learning_rate": 2.40072202166065e-05,
"loss": 0.1704,
"step": 1440
},
{
"epoch": 2.612888688598468,
"grad_norm": 36.99165344238281,
"learning_rate": 2.3826714801444043e-05,
"loss": 0.1632,
"step": 1450
},
{
"epoch": 2.630914826498423,
"grad_norm": 79.26138305664062,
"learning_rate": 2.3646209386281588e-05,
"loss": 0.1562,
"step": 1460
},
{
"epoch": 2.6489409643983777,
"grad_norm": 47.28179931640625,
"learning_rate": 2.3465703971119137e-05,
"loss": 0.2061,
"step": 1470
},
{
"epoch": 2.6669671022983326,
"grad_norm": 40.676551818847656,
"learning_rate": 2.328519855595668e-05,
"loss": 0.1955,
"step": 1480
},
{
"epoch": 2.6849932401982874,
"grad_norm": 29.851699829101562,
"learning_rate": 2.3104693140794227e-05,
"loss": 0.1447,
"step": 1490
},
{
"epoch": 2.7030193780982423,
"grad_norm": 46.22013473510742,
"learning_rate": 2.292418772563177e-05,
"loss": 0.1635,
"step": 1500
},
{
"epoch": 2.721045515998197,
"grad_norm": 35.77067565917969,
"learning_rate": 2.2743682310469316e-05,
"loss": 0.1611,
"step": 1510
},
{
"epoch": 2.7390716538981525,
"grad_norm": 44.474876403808594,
"learning_rate": 2.2563176895306858e-05,
"loss": 0.159,
"step": 1520
},
{
"epoch": 2.757097791798107,
"grad_norm": 49.898887634277344,
"learning_rate": 2.2382671480144403e-05,
"loss": 0.1716,
"step": 1530
},
{
"epoch": 2.7751239296980623,
"grad_norm": 54.783653259277344,
"learning_rate": 2.220216606498195e-05,
"loss": 0.1921,
"step": 1540
},
{
"epoch": 2.793150067598017,
"grad_norm": 14.532942771911621,
"learning_rate": 2.2021660649819496e-05,
"loss": 0.1436,
"step": 1550
},
{
"epoch": 2.811176205497972,
"grad_norm": 47.2420768737793,
"learning_rate": 2.184115523465704e-05,
"loss": 0.2074,
"step": 1560
},
{
"epoch": 2.829202343397927,
"grad_norm": 39.21223449707031,
"learning_rate": 2.1660649819494586e-05,
"loss": 0.1871,
"step": 1570
},
{
"epoch": 2.847228481297882,
"grad_norm": 43.29368591308594,
"learning_rate": 2.148014440433213e-05,
"loss": 0.1474,
"step": 1580
},
{
"epoch": 2.8652546191978367,
"grad_norm": 30.276391983032227,
"learning_rate": 2.1299638989169676e-05,
"loss": 0.2214,
"step": 1590
},
{
"epoch": 2.8832807570977916,
"grad_norm": 38.97634506225586,
"learning_rate": 2.111913357400722e-05,
"loss": 0.1692,
"step": 1600
},
{
"epoch": 2.901306894997747,
"grad_norm": 46.147945404052734,
"learning_rate": 2.0938628158844766e-05,
"loss": 0.1587,
"step": 1610
},
{
"epoch": 2.9193330328977014,
"grad_norm": 57.70730209350586,
"learning_rate": 2.075812274368231e-05,
"loss": 0.1642,
"step": 1620
},
{
"epoch": 2.9373591707976567,
"grad_norm": 48.976478576660156,
"learning_rate": 2.057761732851986e-05,
"loss": 0.1536,
"step": 1630
},
{
"epoch": 2.9553853086976116,
"grad_norm": 49.945587158203125,
"learning_rate": 2.0397111913357404e-05,
"loss": 0.1612,
"step": 1640
},
{
"epoch": 2.9734114465975665,
"grad_norm": 74.91495513916016,
"learning_rate": 2.0216606498194946e-05,
"loss": 0.1557,
"step": 1650
},
{
"epoch": 2.9914375844975214,
"grad_norm": 30.24640464782715,
"learning_rate": 2.003610108303249e-05,
"loss": 0.1631,
"step": 1660
},
{
"epoch": 3.0,
"eval_f1": 0.8290598290598291,
"eval_loss": 0.5290467739105225,
"eval_precision": 0.8106978687839532,
"eval_recall": 0.8482728465238304,
"eval_runtime": 11.4074,
"eval_samples_per_second": 409.646,
"eval_steps_per_second": 51.282,
"step": 1665
}
],
"logging_steps": 10,
"max_steps": 2770,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.401224236425216e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}