|
{ |
|
"best_metric": 0.8371559633027523, |
|
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-34/checkpoint-527", |
|
"epoch": 7.0, |
|
"eval_steps": 500, |
|
"global_step": 3689, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 20.85869789123535, |
|
"learning_rate": 0.0002713055538909492, |
|
"loss": 1.5596, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8371559633027523, |
|
"eval_loss": 1.5567338466644287, |
|
"eval_runtime": 2.8111, |
|
"eval_samples_per_second": 310.197, |
|
"eval_steps_per_second": 2.49, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 6.722352027893066, |
|
"learning_rate": 0.00022608796157579106, |
|
"loss": 0.6915, |
|
"step": 1054 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.819954128440367, |
|
"eval_loss": 1.7321298122406006, |
|
"eval_runtime": 2.823, |
|
"eval_samples_per_second": 308.894, |
|
"eval_steps_per_second": 2.48, |
|
"step": 1054 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001809561711435838, |
|
"loss": 0.4888, |
|
"step": 1581 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.819954128440367, |
|
"eval_loss": 1.7588660717010498, |
|
"eval_runtime": 2.8219, |
|
"eval_samples_per_second": 309.007, |
|
"eval_steps_per_second": 2.481, |
|
"step": 1581 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 5.12546443939209, |
|
"learning_rate": 0.0001357385788284256, |
|
"loss": 0.3746, |
|
"step": 2108 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8279816513761468, |
|
"eval_loss": 1.7675563097000122, |
|
"eval_runtime": 2.821, |
|
"eval_samples_per_second": 309.105, |
|
"eval_steps_per_second": 2.481, |
|
"step": 2108 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 11.028658866882324, |
|
"learning_rate": 9.052098651326739e-05, |
|
"loss": 0.2954, |
|
"step": 2635 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.8211009174311926, |
|
"eval_loss": 1.831598162651062, |
|
"eval_runtime": 2.8049, |
|
"eval_samples_per_second": 310.88, |
|
"eval_steps_per_second": 2.496, |
|
"step": 2635 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 19.683809280395508, |
|
"learning_rate": 4.5303394198109176e-05, |
|
"loss": 0.2513, |
|
"step": 3162 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.8222477064220184, |
|
"eval_loss": 1.8130003213882446, |
|
"eval_runtime": 2.8237, |
|
"eval_samples_per_second": 308.816, |
|
"eval_steps_per_second": 2.479, |
|
"step": 3162 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 4.766151428222656, |
|
"learning_rate": 8.580188295096434e-08, |
|
"loss": 0.2259, |
|
"step": 3689 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.8165137614678899, |
|
"eval_loss": 1.8347537517547607, |
|
"eval_runtime": 2.8047, |
|
"eval_samples_per_second": 310.903, |
|
"eval_steps_per_second": 2.496, |
|
"step": 3689 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 3689, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 7, |
|
"save_steps": 500, |
|
"total_flos": 56628612372420.0, |
|
"train_batch_size": 128, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.2366804151203985, |
|
"learning_rate": 0.0003165231462061075, |
|
"num_train_epochs": 7, |
|
"temperature": 22 |
|
} |
|
} |
|
|