ardaspear's picture
Training in progress, step 72, checkpoint
f0f5ab3 verified
raw
history blame
6.84 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.019984733883838736,
"eval_steps": 9,
"global_step": 72,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0002775657483866491,
"eval_loss": 0.5042526125907898,
"eval_runtime": 655.4432,
"eval_samples_per_second": 9.258,
"eval_steps_per_second": 1.158,
"step": 1
},
{
"epoch": 0.0008326972451599472,
"grad_norm": 0.27662014961242676,
"learning_rate": 1.5e-05,
"loss": 0.526,
"step": 3
},
{
"epoch": 0.0016653944903198944,
"grad_norm": 0.27963921427726746,
"learning_rate": 3e-05,
"loss": 0.4897,
"step": 6
},
{
"epoch": 0.002498091735479842,
"grad_norm": 0.29799002408981323,
"learning_rate": 4.5e-05,
"loss": 0.5052,
"step": 9
},
{
"epoch": 0.002498091735479842,
"eval_loss": 0.4782797694206238,
"eval_runtime": 660.6166,
"eval_samples_per_second": 9.185,
"eval_steps_per_second": 1.149,
"step": 9
},
{
"epoch": 0.003330788980639789,
"grad_norm": 0.23466083407402039,
"learning_rate": 4.993910125649561e-05,
"loss": 0.4373,
"step": 12
},
{
"epoch": 0.004163486225799737,
"grad_norm": 0.20258335769176483,
"learning_rate": 4.962019382530521e-05,
"loss": 0.4476,
"step": 15
},
{
"epoch": 0.004996183470959684,
"grad_norm": 0.22826525568962097,
"learning_rate": 4.9031542398457974e-05,
"loss": 0.4442,
"step": 18
},
{
"epoch": 0.004996183470959684,
"eval_loss": 0.42058902978897095,
"eval_runtime": 660.3378,
"eval_samples_per_second": 9.189,
"eval_steps_per_second": 1.149,
"step": 18
},
{
"epoch": 0.0058288807161196304,
"grad_norm": 0.20002856850624084,
"learning_rate": 4.817959636416969e-05,
"loss": 0.4395,
"step": 21
},
{
"epoch": 0.006661577961279578,
"grad_norm": 0.21906918287277222,
"learning_rate": 4.707368982147318e-05,
"loss": 0.423,
"step": 24
},
{
"epoch": 0.007494275206439525,
"grad_norm": 0.1747865527868271,
"learning_rate": 4.572593931387604e-05,
"loss": 0.4055,
"step": 27
},
{
"epoch": 0.007494275206439525,
"eval_loss": 0.39028802514076233,
"eval_runtime": 660.3218,
"eval_samples_per_second": 9.189,
"eval_steps_per_second": 1.149,
"step": 27
},
{
"epoch": 0.008326972451599473,
"grad_norm": 0.15783219039440155,
"learning_rate": 4.415111107797445e-05,
"loss": 0.3895,
"step": 30
},
{
"epoch": 0.00915966969675942,
"grad_norm": 0.17900213599205017,
"learning_rate": 4.2366459261474933e-05,
"loss": 0.4093,
"step": 33
},
{
"epoch": 0.009992366941919368,
"grad_norm": 0.14911402761936188,
"learning_rate": 4.039153688314145e-05,
"loss": 0.3543,
"step": 36
},
{
"epoch": 0.009992366941919368,
"eval_loss": 0.3740505874156952,
"eval_runtime": 660.2612,
"eval_samples_per_second": 9.19,
"eval_steps_per_second": 1.15,
"step": 36
},
{
"epoch": 0.010825064187079314,
"grad_norm": 0.19286595284938812,
"learning_rate": 3.824798160583012e-05,
"loss": 0.378,
"step": 39
},
{
"epoch": 0.011657761432239261,
"grad_norm": 0.20762427151203156,
"learning_rate": 3.5959278669726935e-05,
"loss": 0.4039,
"step": 42
},
{
"epoch": 0.012490458677399209,
"grad_norm": 0.1565249115228653,
"learning_rate": 3.355050358314172e-05,
"loss": 0.3701,
"step": 45
},
{
"epoch": 0.012490458677399209,
"eval_loss": 0.36458876729011536,
"eval_runtime": 659.9797,
"eval_samples_per_second": 9.194,
"eval_steps_per_second": 1.15,
"step": 45
},
{
"epoch": 0.013323155922559156,
"grad_norm": 0.1587727814912796,
"learning_rate": 3.104804738999169e-05,
"loss": 0.363,
"step": 48
},
{
"epoch": 0.014155853167719104,
"grad_norm": 0.19063134491443634,
"learning_rate": 2.8479327524001636e-05,
"loss": 0.3633,
"step": 51
},
{
"epoch": 0.01498855041287905,
"grad_norm": 0.16761146485805511,
"learning_rate": 2.587248741756253e-05,
"loss": 0.3519,
"step": 54
},
{
"epoch": 0.01498855041287905,
"eval_loss": 0.3595520257949829,
"eval_runtime": 660.1365,
"eval_samples_per_second": 9.192,
"eval_steps_per_second": 1.15,
"step": 54
},
{
"epoch": 0.015821247658039,
"grad_norm": 0.17183250188827515,
"learning_rate": 2.3256088156396868e-05,
"loss": 0.3577,
"step": 57
},
{
"epoch": 0.016653944903198947,
"grad_norm": 0.18601562082767487,
"learning_rate": 2.0658795558326743e-05,
"loss": 0.3407,
"step": 60
},
{
"epoch": 0.01748664214835889,
"grad_norm": 0.17450034618377686,
"learning_rate": 1.8109066104575023e-05,
"loss": 0.3614,
"step": 63
},
{
"epoch": 0.01748664214835889,
"eval_loss": 0.35599005222320557,
"eval_runtime": 659.9247,
"eval_samples_per_second": 9.195,
"eval_steps_per_second": 1.15,
"step": 63
},
{
"epoch": 0.01831933939351884,
"grad_norm": 0.1783250868320465,
"learning_rate": 1.56348351646022e-05,
"loss": 0.357,
"step": 66
},
{
"epoch": 0.019152036638678788,
"grad_norm": 0.1924220472574234,
"learning_rate": 1.3263210930352737e-05,
"loss": 0.364,
"step": 69
},
{
"epoch": 0.019984733883838736,
"grad_norm": 0.1912061870098114,
"learning_rate": 1.1020177413231334e-05,
"loss": 0.3565,
"step": 72
},
{
"epoch": 0.019984733883838736,
"eval_loss": 0.35400116443634033,
"eval_runtime": 660.0962,
"eval_samples_per_second": 9.193,
"eval_steps_per_second": 1.15,
"step": 72
}
],
"logging_steps": 3,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.5693418665187738e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}