|
{ |
|
"best_metric": 0.8371559633027523, |
|
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-15/checkpoint-527", |
|
"epoch": 7.0, |
|
"eval_steps": 500, |
|
"global_step": 3689, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 20.894563674926758, |
|
"learning_rate": 0.00025961167449966676, |
|
"loss": 1.6678, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8371559633027523, |
|
"eval_loss": 1.635141134262085, |
|
"eval_runtime": 2.8174, |
|
"eval_samples_per_second": 309.502, |
|
"eval_steps_per_second": 2.485, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 7.817446708679199, |
|
"learning_rate": 0.00021634306208305565, |
|
"loss": 0.738, |
|
"step": 1054 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8188073394495413, |
|
"eval_loss": 1.8634682893753052, |
|
"eval_runtime": 2.8123, |
|
"eval_samples_per_second": 310.062, |
|
"eval_steps_per_second": 2.489, |
|
"step": 1054 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00017315655329531854, |
|
"loss": 0.5214, |
|
"step": 1581 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8222477064220184, |
|
"eval_loss": 1.81102454662323, |
|
"eval_runtime": 2.8135, |
|
"eval_samples_per_second": 309.932, |
|
"eval_steps_per_second": 2.488, |
|
"step": 1581 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 6.225761890411377, |
|
"learning_rate": 0.0001298879408787074, |
|
"loss": 0.4003, |
|
"step": 2108 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8302752293577982, |
|
"eval_loss": 1.878706932067871, |
|
"eval_runtime": 2.8102, |
|
"eval_samples_per_second": 310.296, |
|
"eval_steps_per_second": 2.491, |
|
"step": 2108 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 12.372098922729492, |
|
"learning_rate": 8.661932846209629e-05, |
|
"loss": 0.32, |
|
"step": 2635 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.8256880733944955, |
|
"eval_loss": 1.8621728420257568, |
|
"eval_runtime": 2.8187, |
|
"eval_samples_per_second": 309.357, |
|
"eval_steps_per_second": 2.483, |
|
"step": 2635 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 20.063739776611328, |
|
"learning_rate": 4.3350716045485156e-05, |
|
"loss": 0.2702, |
|
"step": 3162 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.823394495412844, |
|
"eval_loss": 1.8646079301834106, |
|
"eval_runtime": 2.813, |
|
"eval_samples_per_second": 309.994, |
|
"eval_steps_per_second": 2.488, |
|
"step": 3162 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 8.405074119567871, |
|
"learning_rate": 8.21036288740249e-08, |
|
"loss": 0.2411, |
|
"step": 3689 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.8268348623853211, |
|
"eval_loss": 1.880131483078003, |
|
"eval_runtime": 2.8069, |
|
"eval_samples_per_second": 310.665, |
|
"eval_steps_per_second": 2.494, |
|
"step": 3689 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 3689, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 7, |
|
"save_steps": 500, |
|
"total_flos": 56628612372420.0, |
|
"train_batch_size": 128, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.18003409042088625, |
|
"learning_rate": 0.0003028802869162779, |
|
"num_train_epochs": 7, |
|
"temperature": 27 |
|
} |
|
} |
|
|