xuancoblab2023's picture
Training in progress, epoch 1
7229a85 verified
raw
history blame
5.34 kB
{
"best_metric": 0.821917808219178,
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-3/checkpoint-3830",
"epoch": 9.0,
"eval_steps": 500,
"global_step": 6894,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 2.53190279006958,
"learning_rate": 0.00016492442050886904,
"loss": 0.4371,
"step": 766
},
{
"epoch": 1.0,
"eval_accuracy": 0.7475538160469667,
"eval_f1": 0.7465618860510805,
"eval_loss": 0.4075860381126404,
"eval_precision": 0.7495069033530573,
"eval_recall": 0.7436399217221135,
"eval_runtime": 132.2506,
"eval_samples_per_second": 7.728,
"eval_steps_per_second": 1.936,
"step": 766
},
{
"epoch": 2.0,
"grad_norm": 15.610742568969727,
"learning_rate": 0.00014430886794526042,
"loss": 0.4097,
"step": 1532
},
{
"epoch": 2.0,
"eval_accuracy": 0.7818003913894325,
"eval_f1": 0.81053525913339,
"eval_loss": 0.3935793936252594,
"eval_precision": 0.7162162162162162,
"eval_recall": 0.9334637964774951,
"eval_runtime": 132.8723,
"eval_samples_per_second": 7.692,
"eval_steps_per_second": 1.927,
"step": 1532
},
{
"epoch": 3.0,
"grad_norm": 2.8801589012145996,
"learning_rate": 0.00012369331538165178,
"loss": 0.3882,
"step": 2298
},
{
"epoch": 3.0,
"eval_accuracy": 0.8023483365949119,
"eval_f1": 0.7971887550200805,
"eval_loss": 0.39150404930114746,
"eval_precision": 0.8185567010309278,
"eval_recall": 0.776908023483366,
"eval_runtime": 132.9694,
"eval_samples_per_second": 7.686,
"eval_steps_per_second": 1.925,
"step": 2298
},
{
"epoch": 4.0,
"grad_norm": 3.221484422683716,
"learning_rate": 0.00010307776281804316,
"loss": 0.382,
"step": 3064
},
{
"epoch": 4.0,
"eval_accuracy": 0.8052837573385518,
"eval_f1": 0.8039408866995074,
"eval_loss": 0.38070735335350037,
"eval_precision": 0.8095238095238095,
"eval_recall": 0.7984344422700587,
"eval_runtime": 132.6071,
"eval_samples_per_second": 7.707,
"eval_steps_per_second": 1.931,
"step": 3064
},
{
"epoch": 5.0,
"grad_norm": 23.843862533569336,
"learning_rate": 8.246221025443452e-05,
"loss": 0.3666,
"step": 3830
},
{
"epoch": 5.0,
"eval_accuracy": 0.821917808219178,
"eval_f1": 0.8266666666666668,
"eval_loss": 0.3845100998878479,
"eval_precision": 0.8051948051948052,
"eval_recall": 0.8493150684931506,
"eval_runtime": 131.7757,
"eval_samples_per_second": 7.756,
"eval_steps_per_second": 1.943,
"step": 3830
},
{
"epoch": 6.0,
"grad_norm": 4.022406578063965,
"learning_rate": 6.184665769082589e-05,
"loss": 0.3653,
"step": 4596
},
{
"epoch": 6.0,
"eval_accuracy": 0.8209393346379648,
"eval_f1": 0.8337874659400546,
"eval_loss": 0.3770919144153595,
"eval_precision": 0.7779661016949152,
"eval_recall": 0.898238747553816,
"eval_runtime": 132.4094,
"eval_samples_per_second": 7.718,
"eval_steps_per_second": 1.933,
"step": 4596
},
{
"epoch": 7.0,
"grad_norm": 4.303035259246826,
"learning_rate": 4.123110512721726e-05,
"loss": 0.3603,
"step": 5362
},
{
"epoch": 7.0,
"eval_accuracy": 0.8072407045009785,
"eval_f1": 0.826431718061674,
"eval_loss": 0.39235764741897583,
"eval_precision": 0.7516025641025641,
"eval_recall": 0.9178082191780822,
"eval_runtime": 133.9227,
"eval_samples_per_second": 7.631,
"eval_steps_per_second": 1.912,
"step": 5362
},
{
"epoch": 8.0,
"grad_norm": 1.2834490537643433,
"learning_rate": 2.061555256360863e-05,
"loss": 0.356,
"step": 6128
},
{
"epoch": 8.0,
"eval_accuracy": 0.8170254403131115,
"eval_f1": 0.8285976168652612,
"eval_loss": 0.38242313265800476,
"eval_precision": 0.7793103448275862,
"eval_recall": 0.8845401174168297,
"eval_runtime": 133.4034,
"eval_samples_per_second": 7.661,
"eval_steps_per_second": 1.919,
"step": 6128
},
{
"epoch": 9.0,
"grad_norm": 0.4201391637325287,
"learning_rate": 0.0,
"loss": 0.3548,
"step": 6894
},
{
"epoch": 9.0,
"eval_accuracy": 0.8170254403131115,
"eval_f1": 0.827966881324747,
"eval_loss": 0.38190174102783203,
"eval_precision": 0.78125,
"eval_recall": 0.8806262230919765,
"eval_runtime": 133.7977,
"eval_samples_per_second": 7.638,
"eval_steps_per_second": 1.913,
"step": 6894
}
],
"logging_steps": 500,
"max_steps": 6894,
"num_input_tokens_seen": 0,
"num_train_epochs": 9,
"save_steps": 500,
"total_flos": 2121256775520.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": {
"alpha": 0.6411063430664922,
"learning_rate": 0.00018553997307247768,
"num_train_epochs": 9,
"temperature": 23
}
}