xuancoblab2023's picture
Training in progress, epoch 1
4ae602e verified
raw
history blame
2.73 kB
{
"best_metric": 0.8013698630136986,
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-17/checkpoint-192",
"epoch": 4.0,
"eval_steps": 500,
"global_step": 192,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 1.7164396047592163,
"learning_rate": 0.0007649073963933848,
"loss": 0.5715,
"step": 48
},
{
"epoch": 1.0,
"eval_accuracy": 0.7397260273972602,
"eval_f1": 0.7861736334405145,
"eval_loss": 0.5001023411750793,
"eval_precision": 0.6671214188267395,
"eval_recall": 0.9569471624266145,
"eval_runtime": 27.2812,
"eval_samples_per_second": 37.462,
"eval_steps_per_second": 0.586,
"step": 48
},
{
"epoch": 2.0,
"grad_norm": 2.8946006298065186,
"learning_rate": 0.0005736805472950385,
"loss": 0.4948,
"step": 96
},
{
"epoch": 2.0,
"eval_accuracy": 0.7455968688845401,
"eval_f1": 0.7893030794165317,
"eval_loss": 0.4874393045902252,
"eval_precision": 0.673582295988935,
"eval_recall": 0.9530332681017613,
"eval_runtime": 27.1224,
"eval_samples_per_second": 37.681,
"eval_steps_per_second": 0.59,
"step": 96
},
{
"epoch": 3.0,
"grad_norm": 2.64782452583313,
"learning_rate": 0.0003824536981966924,
"loss": 0.4839,
"step": 144
},
{
"epoch": 3.0,
"eval_accuracy": 0.7661448140900196,
"eval_f1": 0.8023159636062862,
"eval_loss": 0.4675934612751007,
"eval_precision": 0.6948424068767909,
"eval_recall": 0.949119373776908,
"eval_runtime": 26.9999,
"eval_samples_per_second": 37.852,
"eval_steps_per_second": 0.593,
"step": 144
},
{
"epoch": 4.0,
"grad_norm": 1.7006142139434814,
"learning_rate": 0.0001912268490983462,
"loss": 0.4531,
"step": 192
},
{
"epoch": 4.0,
"eval_accuracy": 0.8013698630136986,
"eval_f1": 0.8031037827352084,
"eval_loss": 0.4373694658279419,
"eval_precision": 0.7961538461538461,
"eval_recall": 0.8101761252446184,
"eval_runtime": 27.2608,
"eval_samples_per_second": 37.49,
"eval_steps_per_second": 0.587,
"step": 192
}
],
"logging_steps": 500,
"max_steps": 240,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 942780789120.0,
"train_batch_size": 64,
"trial_name": null,
"trial_params": {
"alpha": 0.8956137687880612,
"learning_rate": 0.000956134245491731,
"num_train_epochs": 5,
"temperature": 12
}
}