xuancoblab2023's picture
Training in progress, epoch 1
ee0c7c1 verified
raw
history blame
2.78 kB
{
"best_metric": 0.6534653465346535,
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-6/checkpoint-976",
"epoch": 4.0,
"eval_steps": 500,
"global_step": 976,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 0.8588551878929138,
"learning_rate": 0.0001547989976805511,
"loss": 0.5028,
"step": 244
},
{
"epoch": 1.0,
"eval_accuracy": 0.5089108910891089,
"eval_f1": 0.1205673758865248,
"eval_loss": 0.504381000995636,
"eval_precision": 0.5666666666666667,
"eval_recall": 0.06746031746031746,
"eval_runtime": 14.9415,
"eval_samples_per_second": 33.799,
"eval_steps_per_second": 1.071,
"step": 244
},
{
"epoch": 2.0,
"grad_norm": 1.0281460285186768,
"learning_rate": 0.00012899916473379258,
"loss": 0.4912,
"step": 488
},
{
"epoch": 2.0,
"eval_accuracy": 0.5782178217821782,
"eval_f1": 0.4714640198511167,
"eval_loss": 0.484104186296463,
"eval_precision": 0.6291390728476821,
"eval_recall": 0.376984126984127,
"eval_runtime": 15.1104,
"eval_samples_per_second": 33.421,
"eval_steps_per_second": 1.059,
"step": 488
},
{
"epoch": 3.0,
"grad_norm": 1.708383321762085,
"learning_rate": 0.00010319933178703406,
"loss": 0.4797,
"step": 732
},
{
"epoch": 3.0,
"eval_accuracy": 0.6415841584158416,
"eval_f1": 0.6578449905482042,
"eval_loss": 0.4840504229068756,
"eval_precision": 0.628158844765343,
"eval_recall": 0.6904761904761905,
"eval_runtime": 14.8003,
"eval_samples_per_second": 34.121,
"eval_steps_per_second": 1.081,
"step": 732
},
{
"epoch": 4.0,
"grad_norm": 1.030291199684143,
"learning_rate": 7.739949884027555e-05,
"loss": 0.4669,
"step": 976
},
{
"epoch": 4.0,
"eval_accuracy": 0.6534653465346535,
"eval_f1": 0.6777163904235726,
"eval_loss": 0.47764360904693604,
"eval_precision": 0.6323024054982818,
"eval_recall": 0.7301587301587301,
"eval_runtime": 15.0188,
"eval_samples_per_second": 33.624,
"eval_steps_per_second": 1.065,
"step": 976
}
],
"logging_steps": 500,
"max_steps": 1708,
"num_input_tokens_seen": 0,
"num_train_epochs": 7,
"save_steps": 500,
"total_flos": 3111373132560.0,
"train_batch_size": 39,
"trial_name": null,
"trial_params": {
"alpha": 0.6658046915260407,
"learning_rate": 0.0001805988306273096,
"num_train_epochs": 7,
"per_device_train_batch_size": 39,
"temperature": 15
}
}