xuancoblab2023's picture
Training in progress, epoch 1
0082016 verified
raw
history blame
2.74 kB
{
"best_metric": 0.815068493150685,
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-16/checkpoint-3064",
"epoch": 4.0,
"eval_steps": 500,
"global_step": 3064,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 4.586636066436768,
"learning_rate": 0.0002815362185318002,
"loss": 0.5012,
"step": 766
},
{
"epoch": 1.0,
"eval_accuracy": 0.7847358121330724,
"eval_f1": 0.8090277777777778,
"eval_loss": 0.4424646198749542,
"eval_precision": 0.7269890795631825,
"eval_recall": 0.9119373776908023,
"eval_runtime": 131.6873,
"eval_samples_per_second": 7.761,
"eval_steps_per_second": 1.944,
"step": 766
},
{
"epoch": 2.0,
"grad_norm": 4.197028160095215,
"learning_rate": 0.00021115216389885013,
"loss": 0.4492,
"step": 1532
},
{
"epoch": 2.0,
"eval_accuracy": 0.799412915851272,
"eval_f1": 0.8064211520302172,
"eval_loss": 0.440277099609375,
"eval_precision": 0.7791970802919708,
"eval_recall": 0.8356164383561644,
"eval_runtime": 131.1398,
"eval_samples_per_second": 7.793,
"eval_steps_per_second": 1.952,
"step": 1532
},
{
"epoch": 3.0,
"grad_norm": 2.803006172180176,
"learning_rate": 0.0001407681092659001,
"loss": 0.4291,
"step": 2298
},
{
"epoch": 3.0,
"eval_accuracy": 0.812133072407045,
"eval_f1": 0.8218923933209648,
"eval_loss": 0.4386424720287323,
"eval_precision": 0.781305114638448,
"eval_recall": 0.8669275929549902,
"eval_runtime": 130.5843,
"eval_samples_per_second": 7.826,
"eval_steps_per_second": 1.96,
"step": 2298
},
{
"epoch": 4.0,
"grad_norm": 1.590163230895996,
"learning_rate": 7.038405463295006e-05,
"loss": 0.4241,
"step": 3064
},
{
"epoch": 4.0,
"eval_accuracy": 0.815068493150685,
"eval_f1": 0.8177434908389585,
"eval_loss": 0.4371667206287384,
"eval_precision": 0.8060836501901141,
"eval_recall": 0.8297455968688845,
"eval_runtime": 130.7093,
"eval_samples_per_second": 7.819,
"eval_steps_per_second": 1.959,
"step": 3064
}
],
"logging_steps": 500,
"max_steps": 3830,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 942780789120.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": {
"alpha": 0.7716344444380391,
"learning_rate": 0.00035192027316475024,
"num_train_epochs": 5,
"temperature": 10
}
}