xuancoblab2023's picture
Training in progress, epoch 1
e13e52e verified
raw
history blame
2.73 kB
{
"best_metric": 0.7671232876712328,
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-16/checkpoint-144",
"epoch": 4.0,
"eval_steps": 500,
"global_step": 192,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 1.2878928184509277,
"learning_rate": 0.00013115653902338105,
"loss": 0.4962,
"step": 48
},
{
"epoch": 1.0,
"eval_accuracy": 0.700587084148728,
"eval_f1": 0.662251655629139,
"eval_loss": 0.4426267147064209,
"eval_precision": 0.759493670886076,
"eval_recall": 0.5870841487279843,
"eval_runtime": 27.3,
"eval_samples_per_second": 37.436,
"eval_steps_per_second": 0.586,
"step": 48
},
{
"epoch": 2.0,
"grad_norm": 3.258302688598633,
"learning_rate": 0.00011241989059146946,
"loss": 0.4374,
"step": 96
},
{
"epoch": 2.0,
"eval_accuracy": 0.6898238747553816,
"eval_f1": 0.6248520710059171,
"eval_loss": 0.41270217299461365,
"eval_precision": 0.7904191616766467,
"eval_recall": 0.5166340508806262,
"eval_runtime": 27.1797,
"eval_samples_per_second": 37.602,
"eval_steps_per_second": 0.589,
"step": 96
},
{
"epoch": 3.0,
"grad_norm": 1.6551486253738403,
"learning_rate": 9.368324215955789e-05,
"loss": 0.4148,
"step": 144
},
{
"epoch": 3.0,
"eval_accuracy": 0.7671232876712328,
"eval_f1": 0.7643564356435644,
"eval_loss": 0.39426228404045105,
"eval_precision": 0.7735470941883767,
"eval_recall": 0.7553816046966731,
"eval_runtime": 28.5963,
"eval_samples_per_second": 35.739,
"eval_steps_per_second": 0.56,
"step": 144
},
{
"epoch": 4.0,
"grad_norm": 1.3516536951065063,
"learning_rate": 7.494659372764631e-05,
"loss": 0.4044,
"step": 192
},
{
"epoch": 4.0,
"eval_accuracy": 0.7632093933463796,
"eval_f1": 0.7535641547861507,
"eval_loss": 0.3918641209602356,
"eval_precision": 0.7855626326963907,
"eval_recall": 0.7240704500978473,
"eval_runtime": 27.1274,
"eval_samples_per_second": 37.674,
"eval_steps_per_second": 0.59,
"step": 192
}
],
"logging_steps": 500,
"max_steps": 384,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 500,
"total_flos": 942780789120.0,
"train_batch_size": 64,
"trial_name": null,
"trial_params": {
"alpha": 0.6758158495370239,
"learning_rate": 0.00014989318745529262,
"num_train_epochs": 8,
"temperature": 20
}
}