|
{ |
|
"best_metric": 0.5762376237623762, |
|
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-3/checkpoint-1782", |
|
"epoch": 7.0, |
|
"eval_steps": 500, |
|
"global_step": 2079, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.8068264722824097, |
|
"learning_rate": 1.8960352341732743e-05, |
|
"loss": 0.5426, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.500990099009901, |
|
"eval_loss": 0.5363946557044983, |
|
"eval_runtime": 55.6857, |
|
"eval_samples_per_second": 9.069, |
|
"eval_steps_per_second": 0.287, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.7480373978614807, |
|
"learning_rate": 1.580029361811062e-05, |
|
"loss": 0.5354, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.5564356435643565, |
|
"eval_loss": 0.5311887264251709, |
|
"eval_runtime": 55.3283, |
|
"eval_samples_per_second": 9.127, |
|
"eval_steps_per_second": 0.289, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 1.3350061178207397, |
|
"learning_rate": 1.2640234894488496e-05, |
|
"loss": 0.5312, |
|
"step": 891 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.5603960396039604, |
|
"eval_loss": 0.5280594229698181, |
|
"eval_runtime": 55.2512, |
|
"eval_samples_per_second": 9.14, |
|
"eval_steps_per_second": 0.29, |
|
"step": 891 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 1.1148613691329956, |
|
"learning_rate": 9.480176170866372e-06, |
|
"loss": 0.5287, |
|
"step": 1188 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.5643564356435643, |
|
"eval_loss": 0.5284162759780884, |
|
"eval_runtime": 55.166, |
|
"eval_samples_per_second": 9.154, |
|
"eval_steps_per_second": 0.29, |
|
"step": 1188 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 1.2209173440933228, |
|
"learning_rate": 6.320117447244248e-06, |
|
"loss": 0.5278, |
|
"step": 1485 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.5683168316831683, |
|
"eval_loss": 0.5252435803413391, |
|
"eval_runtime": 55.1513, |
|
"eval_samples_per_second": 9.157, |
|
"eval_steps_per_second": 0.29, |
|
"step": 1485 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 1.3163679838180542, |
|
"learning_rate": 3.160058723622124e-06, |
|
"loss": 0.5264, |
|
"step": 1782 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.5762376237623762, |
|
"eval_loss": 0.525602400302887, |
|
"eval_runtime": 57.497, |
|
"eval_samples_per_second": 8.783, |
|
"eval_steps_per_second": 0.278, |
|
"step": 1782 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 1.077811360359192, |
|
"learning_rate": 0.0, |
|
"loss": 0.5257, |
|
"step": 2079 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.5702970297029702, |
|
"eval_loss": 0.524278998374939, |
|
"eval_runtime": 55.3503, |
|
"eval_samples_per_second": 9.124, |
|
"eval_steps_per_second": 0.289, |
|
"step": 2079 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2079, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 7, |
|
"save_steps": 500, |
|
"total_flos": 5444902981980.0, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.7228353037734928, |
|
"learning_rate": 2.212041106535487e-05, |
|
"num_train_epochs": 7, |
|
"temperature": 7 |
|
} |
|
} |
|
|