LIMO / trainer_state.json
sedrickkeh's picture
End of training
8caef05 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.8461538461538463,
"eval_steps": 500,
"global_step": 24,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11538461538461539,
"grad_norm": 6.456618785858154,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.9054,
"step": 1
},
{
"epoch": 0.23076923076923078,
"grad_norm": 6.547460556030273,
"learning_rate": 6.666666666666667e-06,
"loss": 0.9154,
"step": 2
},
{
"epoch": 0.34615384615384615,
"grad_norm": 5.908721923828125,
"learning_rate": 1e-05,
"loss": 0.8793,
"step": 3
},
{
"epoch": 0.46153846153846156,
"grad_norm": 2.536839008331299,
"learning_rate": 9.944154131125643e-06,
"loss": 0.8316,
"step": 4
},
{
"epoch": 0.5769230769230769,
"grad_norm": 4.8492865562438965,
"learning_rate": 9.777864028930705e-06,
"loss": 0.8177,
"step": 5
},
{
"epoch": 0.6923076923076923,
"grad_norm": 5.343000411987305,
"learning_rate": 9.504844339512096e-06,
"loss": 0.8852,
"step": 6
},
{
"epoch": 0.8076923076923077,
"grad_norm": 5.4906439781188965,
"learning_rate": 9.131193871579975e-06,
"loss": 0.8383,
"step": 7
},
{
"epoch": 0.9230769230769231,
"grad_norm": 3.948390245437622,
"learning_rate": 8.665259359149132e-06,
"loss": 0.7625,
"step": 8
},
{
"epoch": 1.0769230769230769,
"grad_norm": 4.181735992431641,
"learning_rate": 8.117449009293668e-06,
"loss": 1.2059,
"step": 9
},
{
"epoch": 1.1923076923076923,
"grad_norm": 1.8489601612091064,
"learning_rate": 7.500000000000001e-06,
"loss": 0.7324,
"step": 10
},
{
"epoch": 1.3076923076923077,
"grad_norm": 1.8061974048614502,
"learning_rate": 6.8267051218319766e-06,
"loss": 0.7359,
"step": 11
},
{
"epoch": 1.4230769230769231,
"grad_norm": 1.7004175186157227,
"learning_rate": 6.112604669781572e-06,
"loss": 0.7131,
"step": 12
},
{
"epoch": 1.5384615384615383,
"grad_norm": 1.1723263263702393,
"learning_rate": 5.373650467932122e-06,
"loss": 0.6444,
"step": 13
},
{
"epoch": 1.6538461538461537,
"grad_norm": 1.2155582904815674,
"learning_rate": 4.626349532067879e-06,
"loss": 0.7125,
"step": 14
},
{
"epoch": 1.7692307692307692,
"grad_norm": 0.947443425655365,
"learning_rate": 3.887395330218429e-06,
"loss": 0.6467,
"step": 15
},
{
"epoch": 1.8846153846153846,
"grad_norm": 0.8171675801277161,
"learning_rate": 3.173294878168025e-06,
"loss": 0.6705,
"step": 16
},
{
"epoch": 2.0384615384615383,
"grad_norm": 1.1855154037475586,
"learning_rate": 2.5000000000000015e-06,
"loss": 1.0619,
"step": 17
},
{
"epoch": 2.1538461538461537,
"grad_norm": 0.6295896172523499,
"learning_rate": 1.8825509907063328e-06,
"loss": 0.6591,
"step": 18
},
{
"epoch": 2.269230769230769,
"grad_norm": 0.5844054222106934,
"learning_rate": 1.3347406408508695e-06,
"loss": 0.5755,
"step": 19
},
{
"epoch": 2.3846153846153846,
"grad_norm": 0.5806536078453064,
"learning_rate": 8.688061284200266e-07,
"loss": 0.6509,
"step": 20
},
{
"epoch": 2.5,
"grad_norm": 0.5342062711715698,
"learning_rate": 4.951556604879049e-07,
"loss": 0.6663,
"step": 21
},
{
"epoch": 2.6153846153846154,
"grad_norm": 0.4949410855770111,
"learning_rate": 2.2213597106929608e-07,
"loss": 0.6374,
"step": 22
},
{
"epoch": 2.730769230769231,
"grad_norm": 0.5183479189872742,
"learning_rate": 5.584586887435739e-08,
"loss": 0.6574,
"step": 23
},
{
"epoch": 2.8461538461538463,
"grad_norm": 0.4780174195766449,
"learning_rate": 0.0,
"loss": 0.6619,
"step": 24
},
{
"epoch": 2.8461538461538463,
"step": 24,
"total_flos": 31498096181248.0,
"train_loss": 0.769462858637174,
"train_runtime": 692.903,
"train_samples_per_second": 3.537,
"train_steps_per_second": 0.035
}
],
"logging_steps": 1.0,
"max_steps": 24,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 31498096181248.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}