xuancoblab2023's picture
Training in progress, epoch 1
f34f2c5 verified
raw
history blame
2.65 kB
{
"best_metric": 0.8165137614678899,
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-1/checkpoint-2108",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 2635,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 6.5936455726623535,
"learning_rate": 3.3598671878261075e-05,
"loss": 2.2445,
"step": 527
},
{
"epoch": 1.0,
"eval_accuracy": 0.786697247706422,
"eval_loss": 1.6360795497894287,
"eval_runtime": 2.8227,
"eval_samples_per_second": 308.927,
"eval_steps_per_second": 2.48,
"step": 527
},
{
"epoch": 2.0,
"grad_norm": Infinity,
"learning_rate": 2.5230881206493013e-05,
"loss": 1.3366,
"step": 1054
},
{
"epoch": 2.0,
"eval_accuracy": 0.805045871559633,
"eval_loss": 1.3841164112091064,
"eval_runtime": 2.8165,
"eval_samples_per_second": 309.6,
"eval_steps_per_second": 2.485,
"step": 1054
},
{
"epoch": 3.0,
"grad_norm": 35.57567596435547,
"learning_rate": 1.6831213236927748e-05,
"loss": 1.0583,
"step": 1581
},
{
"epoch": 3.0,
"eval_accuracy": 0.8027522935779816,
"eval_loss": 1.3448162078857422,
"eval_runtime": 2.8149,
"eval_samples_per_second": 309.783,
"eval_steps_per_second": 2.487,
"step": 1581
},
{
"epoch": 4.0,
"grad_norm": 18.64527702331543,
"learning_rate": 8.431545267362479e-06,
"loss": 0.931,
"step": 2108
},
{
"epoch": 4.0,
"eval_accuracy": 0.8165137614678899,
"eval_loss": 1.3177071809768677,
"eval_runtime": 2.8202,
"eval_samples_per_second": 309.195,
"eval_steps_per_second": 2.482,
"step": 2108
},
{
"epoch": 5.0,
"grad_norm": 16.987468719482422,
"learning_rate": 3.187729779721164e-08,
"loss": 0.8787,
"step": 2635
},
{
"epoch": 5.0,
"eval_accuracy": 0.8153669724770642,
"eval_loss": 1.315588116645813,
"eval_runtime": 2.8163,
"eval_samples_per_second": 309.628,
"eval_steps_per_second": 2.486,
"step": 2635
}
],
"logging_steps": 500,
"max_steps": 2635,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 40447180739880.0,
"train_batch_size": 128,
"trial_name": null,
"trial_params": {
"alpha": 0.4702639641604981,
"learning_rate": 4.199833984782634e-05,
"num_train_epochs": 5,
"temperature": 22
}
}