nttx's picture
Training in progress, epoch 0, checkpoint
967e171 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.027785495971103084,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0001389274798555154,
"eval_loss": 2.472825765609741,
"eval_runtime": 288.4466,
"eval_samples_per_second": 10.508,
"eval_steps_per_second": 5.256,
"step": 1
},
{
"epoch": 0.0006946373992775772,
"grad_norm": 0.7895943522453308,
"learning_rate": 5e-05,
"loss": 6.3296,
"step": 5
},
{
"epoch": 0.0013892747985551543,
"grad_norm": 1.3175629377365112,
"learning_rate": 0.0001,
"loss": 6.4692,
"step": 10
},
{
"epoch": 0.0020839121978327314,
"grad_norm": 1.6610268354415894,
"learning_rate": 9.98292246503335e-05,
"loss": 8.5633,
"step": 15
},
{
"epoch": 0.0027785495971103086,
"grad_norm": 1.8091799020767212,
"learning_rate": 9.931806517013612e-05,
"loss": 7.7043,
"step": 20
},
{
"epoch": 0.0034731869963878855,
"grad_norm": 2.3642632961273193,
"learning_rate": 9.847001329696653e-05,
"loss": 7.5649,
"step": 25
},
{
"epoch": 0.004167824395665463,
"grad_norm": 2.7922849655151367,
"learning_rate": 9.729086208503174e-05,
"loss": 6.599,
"step": 30
},
{
"epoch": 0.0048624617949430396,
"grad_norm": 3.5944840908050537,
"learning_rate": 9.578866633275288e-05,
"loss": 7.3915,
"step": 35
},
{
"epoch": 0.005557099194220617,
"grad_norm": 4.525637149810791,
"learning_rate": 9.397368756032445e-05,
"loss": 8.0343,
"step": 40
},
{
"epoch": 0.006251736593498194,
"grad_norm": 6.427048683166504,
"learning_rate": 9.185832391312644e-05,
"loss": 8.2018,
"step": 45
},
{
"epoch": 0.006946373992775771,
"grad_norm": 9.417752265930176,
"learning_rate": 8.945702546981969e-05,
"loss": 7.8271,
"step": 50
},
{
"epoch": 0.006946373992775771,
"eval_loss": 1.688758373260498,
"eval_runtime": 290.1662,
"eval_samples_per_second": 10.446,
"eval_steps_per_second": 5.225,
"step": 50
},
{
"epoch": 0.007641011392053348,
"grad_norm": 2.2982845306396484,
"learning_rate": 8.678619553365659e-05,
"loss": 4.8406,
"step": 55
},
{
"epoch": 0.008335648791330925,
"grad_norm": 1.8996556997299194,
"learning_rate": 8.386407858128706e-05,
"loss": 5.3847,
"step": 60
},
{
"epoch": 0.009030286190608503,
"grad_norm": 1.6495345830917358,
"learning_rate": 8.07106356344834e-05,
"loss": 5.992,
"step": 65
},
{
"epoch": 0.009724923589886079,
"grad_norm": 2.2762582302093506,
"learning_rate": 7.734740790612136e-05,
"loss": 6.4391,
"step": 70
},
{
"epoch": 0.010419560989163657,
"grad_norm": 1.997231364250183,
"learning_rate": 7.379736965185368e-05,
"loss": 6.304,
"step": 75
},
{
"epoch": 0.011114198388441235,
"grad_norm": 2.5489630699157715,
"learning_rate": 7.008477123264848e-05,
"loss": 6.3471,
"step": 80
},
{
"epoch": 0.01180883578771881,
"grad_norm": 4.514221668243408,
"learning_rate": 6.623497346023418e-05,
"loss": 6.3654,
"step": 85
},
{
"epoch": 0.012503473186996388,
"grad_norm": 6.375067234039307,
"learning_rate": 6.227427435703997e-05,
"loss": 6.3578,
"step": 90
},
{
"epoch": 0.013198110586273964,
"grad_norm": 10.424362182617188,
"learning_rate": 5.8229729514036705e-05,
"loss": 6.8525,
"step": 95
},
{
"epoch": 0.013892747985551542,
"grad_norm": 8.787095069885254,
"learning_rate": 5.4128967273616625e-05,
"loss": 6.9822,
"step": 100
},
{
"epoch": 0.013892747985551542,
"eval_loss": 1.5637677907943726,
"eval_runtime": 290.3574,
"eval_samples_per_second": 10.439,
"eval_steps_per_second": 5.221,
"step": 100
},
{
"epoch": 0.01458738538482912,
"grad_norm": 1.920885443687439,
"learning_rate": 5e-05,
"loss": 4.4138,
"step": 105
},
{
"epoch": 0.015282022784106696,
"grad_norm": 1.7754857540130615,
"learning_rate": 4.5871032726383386e-05,
"loss": 4.7733,
"step": 110
},
{
"epoch": 0.015976660183384273,
"grad_norm": 2.1266419887542725,
"learning_rate": 4.17702704859633e-05,
"loss": 5.9913,
"step": 115
},
{
"epoch": 0.01667129758266185,
"grad_norm": 2.144942045211792,
"learning_rate": 3.772572564296005e-05,
"loss": 6.1769,
"step": 120
},
{
"epoch": 0.01736593498193943,
"grad_norm": 2.360759973526001,
"learning_rate": 3.3765026539765834e-05,
"loss": 6.0735,
"step": 125
},
{
"epoch": 0.018060572381217006,
"grad_norm": 2.8511805534362793,
"learning_rate": 2.991522876735154e-05,
"loss": 6.0674,
"step": 130
},
{
"epoch": 0.01875520978049458,
"grad_norm": 3.699951410293579,
"learning_rate": 2.6202630348146324e-05,
"loss": 7.084,
"step": 135
},
{
"epoch": 0.019449847179772158,
"grad_norm": 6.2040557861328125,
"learning_rate": 2.2652592093878666e-05,
"loss": 6.0296,
"step": 140
},
{
"epoch": 0.020144484579049736,
"grad_norm": 6.199789047241211,
"learning_rate": 1.928936436551661e-05,
"loss": 6.3335,
"step": 145
},
{
"epoch": 0.020839121978327314,
"grad_norm": 12.588438034057617,
"learning_rate": 1.6135921418712956e-05,
"loss": 6.7239,
"step": 150
},
{
"epoch": 0.020839121978327314,
"eval_loss": 1.5134177207946777,
"eval_runtime": 290.0025,
"eval_samples_per_second": 10.452,
"eval_steps_per_second": 5.228,
"step": 150
},
{
"epoch": 0.02153375937760489,
"grad_norm": 1.827096939086914,
"learning_rate": 1.3213804466343421e-05,
"loss": 4.103,
"step": 155
},
{
"epoch": 0.02222839677688247,
"grad_norm": 1.7426300048828125,
"learning_rate": 1.0542974530180327e-05,
"loss": 4.4376,
"step": 160
},
{
"epoch": 0.022923034176160043,
"grad_norm": 1.9147125482559204,
"learning_rate": 8.141676086873572e-06,
"loss": 5.1879,
"step": 165
},
{
"epoch": 0.02361767157543762,
"grad_norm": 2.1786575317382812,
"learning_rate": 6.026312439675552e-06,
"loss": 6.0528,
"step": 170
},
{
"epoch": 0.0243123089747152,
"grad_norm": 3.0548410415649414,
"learning_rate": 4.2113336672471245e-06,
"loss": 5.7481,
"step": 175
},
{
"epoch": 0.025006946373992776,
"grad_norm": 2.6999576091766357,
"learning_rate": 2.7091379149682685e-06,
"loss": 6.2866,
"step": 180
},
{
"epoch": 0.025701583773270354,
"grad_norm": 4.492112636566162,
"learning_rate": 1.5299867030334814e-06,
"loss": 5.8095,
"step": 185
},
{
"epoch": 0.02639622117254793,
"grad_norm": 4.557647228240967,
"learning_rate": 6.819348298638839e-07,
"loss": 6.2617,
"step": 190
},
{
"epoch": 0.027090858571825506,
"grad_norm": 7.8843584060668945,
"learning_rate": 1.7077534966650766e-07,
"loss": 6.8044,
"step": 195
},
{
"epoch": 0.027785495971103084,
"grad_norm": 8.694259643554688,
"learning_rate": 0.0,
"loss": 7.3994,
"step": 200
},
{
"epoch": 0.027785495971103084,
"eval_loss": 1.5116981267929077,
"eval_runtime": 290.4363,
"eval_samples_per_second": 10.436,
"eval_steps_per_second": 5.22,
"step": 200
}
],
"logging_steps": 5,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.719421228993331e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}