xuancoblab2023's picture
Training in progress, epoch 1
f003420 verified
raw
history blame
5.33 kB
{
"best_metric": 0.8434442270058709,
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-22/checkpoint-864",
"epoch": 9.0,
"eval_steps": 500,
"global_step": 864,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 2.464465618133545,
"learning_rate": 0.0001794635836369996,
"loss": 0.565,
"step": 96
},
{
"epoch": 1.0,
"eval_accuracy": 0.7788649706457925,
"eval_f1": 0.8078231292517007,
"eval_loss": 0.4691426157951355,
"eval_precision": 0.7142857142857143,
"eval_recall": 0.9295499021526419,
"eval_runtime": 25.0337,
"eval_samples_per_second": 40.825,
"eval_steps_per_second": 1.278,
"step": 96
},
{
"epoch": 2.0,
"grad_norm": 4.053184509277344,
"learning_rate": 0.00015703063568237466,
"loss": 0.4676,
"step": 192
},
{
"epoch": 2.0,
"eval_accuracy": 0.7984344422700587,
"eval_f1": 0.8170515097690941,
"eval_loss": 0.4367915391921997,
"eval_precision": 0.7479674796747967,
"eval_recall": 0.9001956947162426,
"eval_runtime": 25.2941,
"eval_samples_per_second": 40.405,
"eval_steps_per_second": 1.265,
"step": 192
},
{
"epoch": 3.0,
"grad_norm": 2.9524621963500977,
"learning_rate": 0.00013459768772774972,
"loss": 0.4358,
"step": 288
},
{
"epoch": 3.0,
"eval_accuracy": 0.8209393346379648,
"eval_f1": 0.8334849863512284,
"eval_loss": 0.4263817071914673,
"eval_precision": 0.7789115646258503,
"eval_recall": 0.8962818003913894,
"eval_runtime": 24.9367,
"eval_samples_per_second": 40.984,
"eval_steps_per_second": 1.283,
"step": 288
},
{
"epoch": 4.0,
"grad_norm": 6.636496067047119,
"learning_rate": 0.00011216473977312478,
"loss": 0.4174,
"step": 384
},
{
"epoch": 4.0,
"eval_accuracy": 0.8160469667318982,
"eval_f1": 0.8181818181818182,
"eval_loss": 0.41675594449043274,
"eval_precision": 0.8087954110898662,
"eval_recall": 0.8277886497064579,
"eval_runtime": 24.9785,
"eval_samples_per_second": 40.915,
"eval_steps_per_second": 1.281,
"step": 384
},
{
"epoch": 5.0,
"grad_norm": 2.0662448406219482,
"learning_rate": 8.97317918184998e-05,
"loss": 0.4077,
"step": 480
},
{
"epoch": 5.0,
"eval_accuracy": 0.8356164383561644,
"eval_f1": 0.8502673796791443,
"eval_loss": 0.40537795424461365,
"eval_precision": 0.7806873977086743,
"eval_recall": 0.9334637964774951,
"eval_runtime": 25.8814,
"eval_samples_per_second": 39.488,
"eval_steps_per_second": 1.236,
"step": 480
},
{
"epoch": 6.0,
"grad_norm": 4.637610912322998,
"learning_rate": 6.729884386387486e-05,
"loss": 0.4004,
"step": 576
},
{
"epoch": 6.0,
"eval_accuracy": 0.8258317025440313,
"eval_f1": 0.842756183745583,
"eval_loss": 0.40995147824287415,
"eval_precision": 0.7681159420289855,
"eval_recall": 0.9334637964774951,
"eval_runtime": 25.8126,
"eval_samples_per_second": 39.593,
"eval_steps_per_second": 1.24,
"step": 576
},
{
"epoch": 7.0,
"grad_norm": 3.4253664016723633,
"learning_rate": 4.48658959092499e-05,
"loss": 0.391,
"step": 672
},
{
"epoch": 7.0,
"eval_accuracy": 0.8326810176125244,
"eval_f1": 0.8488063660477454,
"eval_loss": 0.40309804677963257,
"eval_precision": 0.7741935483870968,
"eval_recall": 0.9393346379647749,
"eval_runtime": 26.0268,
"eval_samples_per_second": 39.267,
"eval_steps_per_second": 1.23,
"step": 672
},
{
"epoch": 8.0,
"grad_norm": 4.7185893058776855,
"learning_rate": 2.243294795462495e-05,
"loss": 0.3869,
"step": 768
},
{
"epoch": 8.0,
"eval_accuracy": 0.8395303326810176,
"eval_f1": 0.851985559566787,
"eval_loss": 0.39594146609306335,
"eval_precision": 0.7906197654941374,
"eval_recall": 0.923679060665362,
"eval_runtime": 25.1887,
"eval_samples_per_second": 40.574,
"eval_steps_per_second": 1.27,
"step": 768
},
{
"epoch": 9.0,
"grad_norm": 2.016538619995117,
"learning_rate": 0.0,
"loss": 0.3828,
"step": 864
},
{
"epoch": 9.0,
"eval_accuracy": 0.8434442270058709,
"eval_f1": 0.8542805100182149,
"eval_loss": 0.39343148469924927,
"eval_precision": 0.7989778534923339,
"eval_recall": 0.9178082191780822,
"eval_runtime": 25.5945,
"eval_samples_per_second": 39.931,
"eval_steps_per_second": 1.25,
"step": 864
}
],
"logging_steps": 500,
"max_steps": 864,
"num_input_tokens_seen": 0,
"num_train_epochs": 9,
"save_steps": 500,
"total_flos": 2121256775520.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": {
"alpha": 0.8956145079239534,
"learning_rate": 0.00020189653159162458,
"num_train_epochs": 9,
"temperature": 8
}
}