koray6's picture
End of training
488a20e verified
{
"best_metric": 0.941358024691358,
"best_model_checkpoint": "convnext-tiny-224-finetuned-eurosat/checkpoint-171",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 171,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17543859649122806,
"grad_norm": 1.6056560277938843,
"learning_rate": 2.777777777777778e-05,
"loss": 2.3048,
"step": 10
},
{
"epoch": 0.3508771929824561,
"grad_norm": 3.1393508911132812,
"learning_rate": 4.9346405228758174e-05,
"loss": 2.1547,
"step": 20
},
{
"epoch": 0.5263157894736842,
"grad_norm": 2.2008461952209473,
"learning_rate": 4.607843137254902e-05,
"loss": 1.873,
"step": 30
},
{
"epoch": 0.7017543859649122,
"grad_norm": 2.88468861579895,
"learning_rate": 4.281045751633987e-05,
"loss": 1.5467,
"step": 40
},
{
"epoch": 0.8771929824561403,
"grad_norm": 4.3117899894714355,
"learning_rate": 3.954248366013072e-05,
"loss": 1.2852,
"step": 50
},
{
"epoch": 1.0,
"eval_accuracy": 0.8728395061728395,
"eval_loss": 0.9943345189094543,
"eval_runtime": 112.9902,
"eval_samples_per_second": 14.338,
"eval_steps_per_second": 0.23,
"step": 57
},
{
"epoch": 1.0526315789473684,
"grad_norm": 2.6797170639038086,
"learning_rate": 3.627450980392157e-05,
"loss": 1.0323,
"step": 60
},
{
"epoch": 1.2280701754385965,
"grad_norm": 3.286217212677002,
"learning_rate": 3.300653594771242e-05,
"loss": 0.8614,
"step": 70
},
{
"epoch": 1.4035087719298245,
"grad_norm": 3.1749117374420166,
"learning_rate": 2.9738562091503268e-05,
"loss": 0.7304,
"step": 80
},
{
"epoch": 1.5789473684210527,
"grad_norm": 2.400712728500366,
"learning_rate": 2.647058823529412e-05,
"loss": 0.6561,
"step": 90
},
{
"epoch": 1.7543859649122808,
"grad_norm": 3.6787238121032715,
"learning_rate": 2.320261437908497e-05,
"loss": 0.576,
"step": 100
},
{
"epoch": 1.9298245614035088,
"grad_norm": 3.9098289012908936,
"learning_rate": 1.993464052287582e-05,
"loss": 0.5203,
"step": 110
},
{
"epoch": 2.0,
"eval_accuracy": 0.932716049382716,
"eval_loss": 0.4477611482143402,
"eval_runtime": 115.8395,
"eval_samples_per_second": 13.985,
"eval_steps_per_second": 0.224,
"step": 114
},
{
"epoch": 2.1052631578947367,
"grad_norm": 1.8085237741470337,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.4635,
"step": 120
},
{
"epoch": 2.280701754385965,
"grad_norm": 2.8854968547821045,
"learning_rate": 1.3398692810457516e-05,
"loss": 0.4567,
"step": 130
},
{
"epoch": 2.456140350877193,
"grad_norm": 3.9845328330993652,
"learning_rate": 1.0130718954248367e-05,
"loss": 0.3967,
"step": 140
},
{
"epoch": 2.6315789473684212,
"grad_norm": 3.0889475345611572,
"learning_rate": 6.862745098039216e-06,
"loss": 0.4008,
"step": 150
},
{
"epoch": 2.807017543859649,
"grad_norm": 3.4937639236450195,
"learning_rate": 3.5947712418300652e-06,
"loss": 0.3938,
"step": 160
},
{
"epoch": 2.982456140350877,
"grad_norm": 4.640423774719238,
"learning_rate": 3.2679738562091505e-07,
"loss": 0.3931,
"step": 170
},
{
"epoch": 3.0,
"eval_accuracy": 0.941358024691358,
"eval_loss": 0.338970422744751,
"eval_runtime": 108.2196,
"eval_samples_per_second": 14.97,
"eval_steps_per_second": 0.24,
"step": 171
},
{
"epoch": 3.0,
"step": 171,
"total_flos": 1.0993259373775258e+18,
"train_loss": 0.9407536648867423,
"train_runtime": 3757.4507,
"train_samples_per_second": 11.641,
"train_steps_per_second": 0.046
}
],
"logging_steps": 10,
"max_steps": 171,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0993259373775258e+18,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}