xuancoblab2023's picture
Training in progress, epoch 1
77b7432 verified
raw
history blame
5.33 kB
{
"best_metric": 0.8307240704500979,
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-5/checkpoint-864",
"epoch": 9.0,
"eval_steps": 500,
"global_step": 864,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 4.850555896759033,
"learning_rate": 0.00013535007385325417,
"loss": 0.604,
"step": 96
},
{
"epoch": 1.0,
"eval_accuracy": 0.7485322896281801,
"eval_f1": 0.7925746569814367,
"eval_loss": 0.5116264820098877,
"eval_precision": 0.6744505494505495,
"eval_recall": 0.9608610567514677,
"eval_runtime": 25.4927,
"eval_samples_per_second": 40.09,
"eval_steps_per_second": 1.255,
"step": 96
},
{
"epoch": 2.0,
"grad_norm": 2.2508227825164795,
"learning_rate": 0.0001184313146215974,
"loss": 0.4897,
"step": 192
},
{
"epoch": 2.0,
"eval_accuracy": 0.7984344422700587,
"eval_f1": 0.8214904679376083,
"eval_loss": 0.4436704218387604,
"eval_precision": 0.7371695178849145,
"eval_recall": 0.9275929549902152,
"eval_runtime": 26.2196,
"eval_samples_per_second": 38.979,
"eval_steps_per_second": 1.22,
"step": 192
},
{
"epoch": 3.0,
"grad_norm": 4.221353530883789,
"learning_rate": 0.00010151255538994063,
"loss": 0.4449,
"step": 288
},
{
"epoch": 3.0,
"eval_accuracy": 0.812133072407045,
"eval_f1": 0.8181818181818181,
"eval_loss": 0.43934348225593567,
"eval_precision": 0.7926605504587156,
"eval_recall": 0.8454011741682974,
"eval_runtime": 25.7442,
"eval_samples_per_second": 39.698,
"eval_steps_per_second": 1.243,
"step": 288
},
{
"epoch": 4.0,
"grad_norm": 8.505247116088867,
"learning_rate": 8.459379615828387e-05,
"loss": 0.4275,
"step": 384
},
{
"epoch": 4.0,
"eval_accuracy": 0.8170254403131115,
"eval_f1": 0.8257222739981361,
"eval_loss": 0.41963326930999756,
"eval_precision": 0.7882562277580071,
"eval_recall": 0.8669275929549902,
"eval_runtime": 25.8902,
"eval_samples_per_second": 39.474,
"eval_steps_per_second": 1.236,
"step": 384
},
{
"epoch": 5.0,
"grad_norm": 2.8876335620880127,
"learning_rate": 6.767503692662709e-05,
"loss": 0.4125,
"step": 480
},
{
"epoch": 5.0,
"eval_accuracy": 0.821917808219178,
"eval_f1": 0.8372093023255814,
"eval_loss": 0.41102567315101624,
"eval_precision": 0.771004942339374,
"eval_recall": 0.9158512720156555,
"eval_runtime": 26.546,
"eval_samples_per_second": 38.499,
"eval_steps_per_second": 1.205,
"step": 480
},
{
"epoch": 6.0,
"grad_norm": 3.6481876373291016,
"learning_rate": 5.075627769497032e-05,
"loss": 0.4037,
"step": 576
},
{
"epoch": 6.0,
"eval_accuracy": 0.8199608610567515,
"eval_f1": 0.8371681415929204,
"eval_loss": 0.4179791808128357,
"eval_precision": 0.7641357027463651,
"eval_recall": 0.9256360078277887,
"eval_runtime": 25.6553,
"eval_samples_per_second": 39.836,
"eval_steps_per_second": 1.247,
"step": 576
},
{
"epoch": 7.0,
"grad_norm": 4.620311260223389,
"learning_rate": 3.383751846331354e-05,
"loss": 0.3929,
"step": 672
},
{
"epoch": 7.0,
"eval_accuracy": 0.8268101761252447,
"eval_f1": 0.8426666666666667,
"eval_loss": 0.4127921462059021,
"eval_precision": 0.7719869706840391,
"eval_recall": 0.9275929549902152,
"eval_runtime": 26.1034,
"eval_samples_per_second": 39.152,
"eval_steps_per_second": 1.226,
"step": 672
},
{
"epoch": 8.0,
"grad_norm": 4.286402225494385,
"learning_rate": 1.691875923165677e-05,
"loss": 0.3895,
"step": 768
},
{
"epoch": 8.0,
"eval_accuracy": 0.8277886497064579,
"eval_f1": 0.842857142857143,
"eval_loss": 0.4042445123195648,
"eval_precision": 0.7750410509031199,
"eval_recall": 0.923679060665362,
"eval_runtime": 26.1296,
"eval_samples_per_second": 39.113,
"eval_steps_per_second": 1.225,
"step": 768
},
{
"epoch": 9.0,
"grad_norm": 2.3019790649414062,
"learning_rate": 0.0,
"loss": 0.3874,
"step": 864
},
{
"epoch": 9.0,
"eval_accuracy": 0.8307240704500979,
"eval_f1": 0.843155031731641,
"eval_loss": 0.4016450047492981,
"eval_precision": 0.785472972972973,
"eval_recall": 0.9099804305283757,
"eval_runtime": 25.5652,
"eval_samples_per_second": 39.976,
"eval_steps_per_second": 1.252,
"step": 864
}
],
"logging_steps": 500,
"max_steps": 864,
"num_input_tokens_seen": 0,
"num_train_epochs": 9,
"save_steps": 500,
"total_flos": 2121256775520.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": {
"alpha": 0.9725110110091697,
"learning_rate": 0.00015226883308491095,
"num_train_epochs": 9,
"temperature": 30
}
}