Babel-9B / trainer_state.json
kenchan0226's picture
Upload 15 files
75608c9 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.006,
"eval_steps": 100,
"global_step": 300,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 2e-05,
"grad_norm": 1.8784615993499756,
"learning_rate": 4e-10,
"loss": 1.247075080871582,
"memory(GiB)": 45.58,
"step": 1,
"token_acc": 0.7174163783160323,
"train_speed(iter/s)": 0.013609
},
{
"epoch": 0.0001,
"grad_norm": 1.7829604148864746,
"learning_rate": 2e-09,
"loss": 1.2329106330871582,
"memory(GiB)": 47.68,
"step": 5,
"token_acc": 0.7190702324418895,
"train_speed(iter/s)": 0.056616
},
{
"epoch": 0.0002,
"grad_norm": 1.8510538339614868,
"learning_rate": 4e-09,
"loss": 1.2224847793579101,
"memory(GiB)": 47.68,
"step": 10,
"token_acc": 0.7056490003173596,
"train_speed(iter/s)": 0.093003
},
{
"epoch": 0.0003,
"grad_norm": 1.9363449811935425,
"learning_rate": 5.6e-09,
"loss": 1.2302563667297364,
"memory(GiB)": 48.7,
"step": 15,
"token_acc": 0.7107342772472836,
"train_speed(iter/s)": 0.119381
},
{
"epoch": 0.0004,
"grad_norm": 1.935133695602417,
"learning_rate": 7.6e-09,
"loss": 1.2335556983947753,
"memory(GiB)": 48.7,
"step": 20,
"token_acc": 0.7361481714016638,
"train_speed(iter/s)": 0.137465
},
{
"epoch": 0.0005,
"grad_norm": 1.7830060720443726,
"learning_rate": 9.599999999999998e-09,
"loss": 1.2144426345825194,
"memory(GiB)": 48.7,
"step": 25,
"token_acc": 0.7571428571428571,
"train_speed(iter/s)": 0.150572
},
{
"epoch": 0.0006,
"grad_norm": 1.7309236526489258,
"learning_rate": 1.1599999999999998e-08,
"loss": 1.2357244491577148,
"memory(GiB)": 48.7,
"step": 30,
"token_acc": 0.7153189910979229,
"train_speed(iter/s)": 0.162512
},
{
"epoch": 0.0007,
"grad_norm": 1.7474730014801025,
"learning_rate": 1.36e-08,
"loss": 1.231928825378418,
"memory(GiB)": 48.7,
"step": 35,
"token_acc": 0.7078455355582737,
"train_speed(iter/s)": 0.171663
},
{
"epoch": 0.0008,
"grad_norm": 1.780007004737854,
"learning_rate": 1.5599999999999997e-08,
"loss": 1.2193016052246093,
"memory(GiB)": 48.7,
"step": 40,
"token_acc": 0.6932599724896836,
"train_speed(iter/s)": 0.178935
},
{
"epoch": 0.0009,
"grad_norm": 1.723268747329712,
"learning_rate": 1.72e-08,
"loss": 1.204256820678711,
"memory(GiB)": 48.7,
"step": 45,
"token_acc": 0.6967172137977004,
"train_speed(iter/s)": 0.186547
},
{
"epoch": 0.001,
"grad_norm": 1.7750556468963623,
"learning_rate": 1.9199999999999997e-08,
"loss": 1.2007877349853515,
"memory(GiB)": 48.7,
"step": 50,
"token_acc": 0.6921988682295878,
"train_speed(iter/s)": 0.19125
},
{
"epoch": 0.0011,
"grad_norm": 1.299729585647583,
"learning_rate": 2.1199999999999998e-08,
"loss": 0.8069381713867188,
"memory(GiB)": 48.7,
"step": 55,
"token_acc": 0.7952855847688123,
"train_speed(iter/s)": 0.195908
},
{
"epoch": 0.0012,
"grad_norm": 1.3525291681289673,
"learning_rate": 2.3199999999999996e-08,
"loss": 0.42714319229125974,
"memory(GiB)": 48.7,
"step": 60,
"token_acc": 0.8841448189762796,
"train_speed(iter/s)": 0.201
},
{
"epoch": 0.0013,
"grad_norm": 1.080306053161621,
"learning_rate": 2.52e-08,
"loss": 0.4192944526672363,
"memory(GiB)": 48.7,
"step": 65,
"token_acc": 0.8929637526652452,
"train_speed(iter/s)": 0.204749
},
{
"epoch": 0.0014,
"grad_norm": 1.177667498588562,
"learning_rate": 2.72e-08,
"loss": 0.4133622169494629,
"memory(GiB)": 48.7,
"step": 70,
"token_acc": 0.8836150845253576,
"train_speed(iter/s)": 0.20853
},
{
"epoch": 0.0015,
"grad_norm": 1.4200434684753418,
"learning_rate": 2.92e-08,
"loss": 0.4155101776123047,
"memory(GiB)": 48.7,
"step": 75,
"token_acc": 0.8933333333333333,
"train_speed(iter/s)": 0.211872
},
{
"epoch": 0.0016,
"grad_norm": 1.1931238174438477,
"learning_rate": 3.1199999999999995e-08,
"loss": 0.40213637351989745,
"memory(GiB)": 48.7,
"step": 80,
"token_acc": 0.9073569482288828,
"train_speed(iter/s)": 0.214198
},
{
"epoch": 0.0017,
"grad_norm": 1.2268942594528198,
"learning_rate": 3.32e-08,
"loss": 0.4193448543548584,
"memory(GiB)": 48.7,
"step": 85,
"token_acc": 0.890360895386021,
"train_speed(iter/s)": 0.217149
},
{
"epoch": 0.0018,
"grad_norm": 1.168769121170044,
"learning_rate": 3.52e-08,
"loss": 0.4069235324859619,
"memory(GiB)": 48.7,
"step": 90,
"token_acc": 0.8803038239916187,
"train_speed(iter/s)": 0.219373
},
{
"epoch": 0.0019,
"grad_norm": 1.1890403032302856,
"learning_rate": 3.7199999999999996e-08,
"loss": 0.4017478942871094,
"memory(GiB)": 48.7,
"step": 95,
"token_acc": 0.8852933438568797,
"train_speed(iter/s)": 0.221331
},
{
"epoch": 0.002,
"grad_norm": 1.0330846309661865,
"learning_rate": 3.9199999999999994e-08,
"loss": 0.3853747844696045,
"memory(GiB)": 48.7,
"step": 100,
"token_acc": 0.8942406230784997,
"train_speed(iter/s)": 0.223547
},
{
"epoch": 0.0021,
"grad_norm": 0.8882772922515869,
"learning_rate": 4.12e-08,
"loss": 0.38479089736938477,
"memory(GiB)": 48.7,
"step": 105,
"token_acc": 0.888404744613895,
"train_speed(iter/s)": 0.165159
},
{
"epoch": 0.0022,
"grad_norm": 0.9768219590187073,
"learning_rate": 4.32e-08,
"loss": 0.3818492889404297,
"memory(GiB)": 48.7,
"step": 110,
"token_acc": 0.8976019604410993,
"train_speed(iter/s)": 0.168006
},
{
"epoch": 0.0023,
"grad_norm": 1.0652796030044556,
"learning_rate": 4.5199999999999994e-08,
"loss": 0.37880630493164064,
"memory(GiB)": 48.7,
"step": 115,
"token_acc": 0.8944169293111212,
"train_speed(iter/s)": 0.170961
},
{
"epoch": 0.0024,
"grad_norm": 0.9667234420776367,
"learning_rate": 4.72e-08,
"loss": 0.3773456573486328,
"memory(GiB)": 48.7,
"step": 120,
"token_acc": 0.9041720990873533,
"train_speed(iter/s)": 0.17359
},
{
"epoch": 0.0025,
"grad_norm": 1.1004765033721924,
"learning_rate": 4.92e-08,
"loss": 0.37454307079315186,
"memory(GiB)": 48.7,
"step": 125,
"token_acc": 0.8971794358871774,
"train_speed(iter/s)": 0.17613
},
{
"epoch": 0.0026,
"grad_norm": 0.9110294580459595,
"learning_rate": 5.12e-08,
"loss": 0.35935871601104735,
"memory(GiB)": 48.7,
"step": 130,
"token_acc": 0.9002339927674963,
"train_speed(iter/s)": 0.178791
},
{
"epoch": 0.0027,
"grad_norm": 0.8307713270187378,
"learning_rate": 5.319999999999999e-08,
"loss": 0.3583024501800537,
"memory(GiB)": 48.7,
"step": 135,
"token_acc": 0.9001166861143524,
"train_speed(iter/s)": 0.181064
},
{
"epoch": 0.0028,
"grad_norm": 0.8742683529853821,
"learning_rate": 5.52e-08,
"loss": 0.34030709266662595,
"memory(GiB)": 48.7,
"step": 140,
"token_acc": 0.8936170212765957,
"train_speed(iter/s)": 0.183337
},
{
"epoch": 0.0029,
"grad_norm": 0.8469884395599365,
"learning_rate": 5.7199999999999996e-08,
"loss": 0.3286393404006958,
"memory(GiB)": 48.7,
"step": 145,
"token_acc": 0.9112193294624461,
"train_speed(iter/s)": 0.185295
},
{
"epoch": 0.003,
"grad_norm": 0.7903546094894409,
"learning_rate": 5.92e-08,
"loss": 0.34013702869415285,
"memory(GiB)": 48.7,
"step": 150,
"token_acc": 0.9123177283192633,
"train_speed(iter/s)": 0.187277
},
{
"epoch": 0.0031,
"grad_norm": 0.7647843956947327,
"learning_rate": 6.119999999999999e-08,
"loss": 0.32967448234558105,
"memory(GiB)": 48.7,
"step": 155,
"token_acc": 0.9061872909698997,
"train_speed(iter/s)": 0.189242
},
{
"epoch": 0.0032,
"grad_norm": 0.7588908076286316,
"learning_rate": 6.32e-08,
"loss": 0.31885499954223634,
"memory(GiB)": 48.7,
"step": 160,
"token_acc": 0.9148387096774193,
"train_speed(iter/s)": 0.190988
},
{
"epoch": 0.0033,
"grad_norm": 0.7278595566749573,
"learning_rate": 6.519999999999999e-08,
"loss": 0.3225527048110962,
"memory(GiB)": 48.7,
"step": 165,
"token_acc": 0.9134559535333979,
"train_speed(iter/s)": 0.192579
},
{
"epoch": 0.0034,
"grad_norm": 0.7548233270645142,
"learning_rate": 6.719999999999999e-08,
"loss": 0.3111454725265503,
"memory(GiB)": 48.7,
"step": 170,
"token_acc": 0.9035423647678315,
"train_speed(iter/s)": 0.194372
},
{
"epoch": 0.0035,
"grad_norm": 0.7382190823554993,
"learning_rate": 6.92e-08,
"loss": 0.30791687965393066,
"memory(GiB)": 48.7,
"step": 175,
"token_acc": 0.9182072829131652,
"train_speed(iter/s)": 0.195886
},
{
"epoch": 0.0036,
"grad_norm": 0.6201812028884888,
"learning_rate": 7.12e-08,
"loss": 0.30755660533905027,
"memory(GiB)": 48.7,
"step": 180,
"token_acc": 0.9121319199057715,
"train_speed(iter/s)": 0.197365
},
{
"epoch": 0.0037,
"grad_norm": 0.680054247379303,
"learning_rate": 7.32e-08,
"loss": 0.3023200511932373,
"memory(GiB)": 48.7,
"step": 185,
"token_acc": 0.9189243427654024,
"train_speed(iter/s)": 0.198991
},
{
"epoch": 0.0038,
"grad_norm": 0.7230331301689148,
"learning_rate": 7.52e-08,
"loss": 0.29792306423187254,
"memory(GiB)": 48.7,
"step": 190,
"token_acc": 0.9166977960403436,
"train_speed(iter/s)": 0.200242
},
{
"epoch": 0.0039,
"grad_norm": 0.6341392397880554,
"learning_rate": 7.72e-08,
"loss": 0.2936398983001709,
"memory(GiB)": 48.7,
"step": 195,
"token_acc": 0.916235294117647,
"train_speed(iter/s)": 0.20159
},
{
"epoch": 0.004,
"grad_norm": 0.6513913869857788,
"learning_rate": 7.920000000000001e-08,
"loss": 0.29084038734436035,
"memory(GiB)": 48.7,
"step": 200,
"token_acc": 0.915651358950328,
"train_speed(iter/s)": 0.202853
},
{
"epoch": 0.0041,
"grad_norm": 0.5849307775497437,
"learning_rate": 8.119999999999999e-08,
"loss": 0.28313345909118653,
"memory(GiB)": 48.7,
"step": 205,
"token_acc": 0.9201498422712934,
"train_speed(iter/s)": 0.174461
},
{
"epoch": 0.0042,
"grad_norm": 0.5864230990409851,
"learning_rate": 8.319999999999999e-08,
"loss": 0.27917160987854006,
"memory(GiB)": 48.7,
"step": 210,
"token_acc": 0.9227850926132559,
"train_speed(iter/s)": 0.176086
},
{
"epoch": 0.0043,
"grad_norm": 0.5705774426460266,
"learning_rate": 8.52e-08,
"loss": 0.27348690032958983,
"memory(GiB)": 48.7,
"step": 215,
"token_acc": 0.9209313047487321,
"train_speed(iter/s)": 0.177517
},
{
"epoch": 0.0044,
"grad_norm": 0.5566577911376953,
"learning_rate": 8.72e-08,
"loss": 0.2738009214401245,
"memory(GiB)": 48.7,
"step": 220,
"token_acc": 0.9212003454231433,
"train_speed(iter/s)": 0.178985
},
{
"epoch": 0.0045,
"grad_norm": 0.5548610091209412,
"learning_rate": 8.919999999999999e-08,
"loss": 0.270387601852417,
"memory(GiB)": 48.7,
"step": 225,
"token_acc": 0.9163515016685205,
"train_speed(iter/s)": 0.180404
},
{
"epoch": 0.0046,
"grad_norm": 0.5132172107696533,
"learning_rate": 9.12e-08,
"loss": 0.2689182758331299,
"memory(GiB)": 48.7,
"step": 230,
"token_acc": 0.9161837748344371,
"train_speed(iter/s)": 0.181679
},
{
"epoch": 0.0047,
"grad_norm": 0.5392292141914368,
"learning_rate": 9.32e-08,
"loss": 0.2692979335784912,
"memory(GiB)": 48.7,
"step": 235,
"token_acc": 0.9189686924493554,
"train_speed(iter/s)": 0.18288
},
{
"epoch": 0.0048,
"grad_norm": 0.5800856947898865,
"learning_rate": 9.52e-08,
"loss": 0.2671097755432129,
"memory(GiB)": 48.7,
"step": 240,
"token_acc": 0.9276693455797933,
"train_speed(iter/s)": 0.184197
},
{
"epoch": 0.0049,
"grad_norm": 0.5248638391494751,
"learning_rate": 9.72e-08,
"loss": 0.2651660442352295,
"memory(GiB)": 48.7,
"step": 245,
"token_acc": 0.9340950826361967,
"train_speed(iter/s)": 0.185361
},
{
"epoch": 0.005,
"grad_norm": 0.4779960811138153,
"learning_rate": 9.919999999999999e-08,
"loss": 0.2598712921142578,
"memory(GiB)": 48.7,
"step": 250,
"token_acc": 0.9171786120591582,
"train_speed(iter/s)": 0.186527
},
{
"epoch": 0.0051,
"grad_norm": 0.5446517467498779,
"learning_rate": 1.0119999999999999e-07,
"loss": 0.2589933633804321,
"memory(GiB)": 48.7,
"step": 255,
"token_acc": 0.919714165615805,
"train_speed(iter/s)": 0.187704
},
{
"epoch": 0.0052,
"grad_norm": 0.5014457702636719,
"learning_rate": 1.032e-07,
"loss": 0.2622577667236328,
"memory(GiB)": 48.7,
"step": 260,
"token_acc": 0.9149246475449684,
"train_speed(iter/s)": 0.188846
},
{
"epoch": 0.0053,
"grad_norm": 0.486289918422699,
"learning_rate": 1.052e-07,
"loss": 0.2525398969650269,
"memory(GiB)": 48.7,
"step": 265,
"token_acc": 0.9272396543883583,
"train_speed(iter/s)": 0.189871
},
{
"epoch": 0.0054,
"grad_norm": 0.5184631943702698,
"learning_rate": 1.072e-07,
"loss": 0.2508587121963501,
"memory(GiB)": 48.7,
"step": 270,
"token_acc": 0.9343525179856115,
"train_speed(iter/s)": 0.190895
},
{
"epoch": 0.0055,
"grad_norm": 0.7472315430641174,
"learning_rate": 1.092e-07,
"loss": 0.3759446620941162,
"memory(GiB)": 48.7,
"step": 275,
"token_acc": 0.8695306284805091,
"train_speed(iter/s)": 0.191441
},
{
"epoch": 0.0056,
"grad_norm": 0.6762179136276245,
"learning_rate": 1.1119999999999999e-07,
"loss": 0.5864349365234375,
"memory(GiB)": 48.7,
"step": 280,
"token_acc": 0.8324331623008372,
"train_speed(iter/s)": 0.191626
},
{
"epoch": 0.0057,
"grad_norm": 0.5527864694595337,
"learning_rate": 1.132e-07,
"loss": 0.5735197067260742,
"memory(GiB)": 48.7,
"step": 285,
"token_acc": 0.8262280306444344,
"train_speed(iter/s)": 0.19159
},
{
"epoch": 0.0058,
"grad_norm": 0.46731290221214294,
"learning_rate": 1.1519999999999999e-07,
"loss": 0.5600314140319824,
"memory(GiB)": 48.7,
"step": 290,
"token_acc": 0.8467968392868804,
"train_speed(iter/s)": 0.191504
},
{
"epoch": 0.0059,
"grad_norm": 0.42628228664398193,
"learning_rate": 1.1719999999999999e-07,
"loss": 0.5390614986419677,
"memory(GiB)": 50.81,
"step": 295,
"token_acc": 0.8327679356703104,
"train_speed(iter/s)": 0.191542
},
{
"epoch": 0.006,
"grad_norm": 0.3921428322792053,
"learning_rate": 1.192e-07,
"loss": 0.5223519325256347,
"memory(GiB)": 50.81,
"step": 300,
"token_acc": 0.8428060564559081,
"train_speed(iter/s)": 0.191529
}
],
"logging_steps": 5,
"max_steps": 50000,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.573770266594509e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}