|
{ |
|
"best_metric": 0.8371559633027523, |
|
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-38/checkpoint-527", |
|
"epoch": 6.0, |
|
"eval_steps": 500, |
|
"global_step": 3162, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 6.650190353393555, |
|
"learning_rate": 0.0003977381702368286, |
|
"loss": 0.8907, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8371559633027523, |
|
"eval_loss": 1.0675474405288696, |
|
"eval_runtime": 2.812, |
|
"eval_samples_per_second": 310.097, |
|
"eval_steps_per_second": 2.489, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 7.2698974609375, |
|
"learning_rate": 0.0003181905361894629, |
|
"loss": 0.4134, |
|
"step": 1054 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8084862385321101, |
|
"eval_loss": 1.2476831674575806, |
|
"eval_runtime": 2.8048, |
|
"eval_samples_per_second": 310.9, |
|
"eval_steps_per_second": 2.496, |
|
"step": 1054 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 14.755233764648438, |
|
"learning_rate": 0.00023864290214209717, |
|
"loss": 0.2802, |
|
"step": 1581 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8119266055045872, |
|
"eval_loss": 1.312410831451416, |
|
"eval_runtime": 2.8236, |
|
"eval_samples_per_second": 308.82, |
|
"eval_steps_per_second": 2.479, |
|
"step": 1581 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 4.266346454620361, |
|
"learning_rate": 0.00015909526809473144, |
|
"loss": 0.208, |
|
"step": 2108 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8130733944954128, |
|
"eval_loss": 1.2842535972595215, |
|
"eval_runtime": 2.8057, |
|
"eval_samples_per_second": 310.791, |
|
"eval_steps_per_second": 2.495, |
|
"step": 2108 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 4.0564045906066895, |
|
"learning_rate": 7.969857832449545e-05, |
|
"loss": 0.1663, |
|
"step": 2635 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.823394495412844, |
|
"eval_loss": 1.2945654392242432, |
|
"eval_runtime": 2.8126, |
|
"eval_samples_per_second": 310.037, |
|
"eval_steps_per_second": 2.489, |
|
"step": 2635 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 8.322790145874023, |
|
"learning_rate": 1.5094427712972622e-07, |
|
"loss": 0.1392, |
|
"step": 3162 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.8176605504587156, |
|
"eval_loss": 1.3337725400924683, |
|
"eval_runtime": 2.8133, |
|
"eval_samples_per_second": 309.956, |
|
"eval_steps_per_second": 2.488, |
|
"step": 3162 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 3162, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 6, |
|
"save_steps": 500, |
|
"total_flos": 48527917525620.0, |
|
"train_batch_size": 128, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.63744345806538, |
|
"learning_rate": 0.00047728580428419435, |
|
"num_train_epochs": 6, |
|
"temperature": 29 |
|
} |
|
} |
|
|