|
{ |
|
"best_metric": 0.8209393346379648, |
|
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-12/checkpoint-768", |
|
"epoch": 7.0, |
|
"eval_steps": 500, |
|
"global_step": 1344, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.9969735741615295, |
|
"learning_rate": 0.0003519988357465585, |
|
"loss": 0.4838, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.7915851272015656, |
|
"eval_f1": 0.8149435273675064, |
|
"eval_loss": 0.4351368844509125, |
|
"eval_mcc": 0.6026908465743466, |
|
"eval_precision": 0.7328125, |
|
"eval_recall": 0.9178082191780822, |
|
"eval_runtime": 66.7179, |
|
"eval_samples_per_second": 15.318, |
|
"eval_steps_per_second": 0.48, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 9.071135520935059, |
|
"learning_rate": 0.0002933323631221321, |
|
"loss": 0.435, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.7954990215264188, |
|
"eval_f1": 0.7956989247311829, |
|
"eval_loss": 0.42000794410705566, |
|
"eval_mcc": 0.5909991747116098, |
|
"eval_precision": 0.794921875, |
|
"eval_recall": 0.7964774951076321, |
|
"eval_runtime": 66.4436, |
|
"eval_samples_per_second": 15.381, |
|
"eval_steps_per_second": 0.482, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 3.5558242797851562, |
|
"learning_rate": 0.00023466589049770566, |
|
"loss": 0.4107, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8101761252446184, |
|
"eval_f1": 0.8255395683453237, |
|
"eval_loss": 0.4215203523635864, |
|
"eval_mcc": 0.6302037430382941, |
|
"eval_precision": 0.7637271214642263, |
|
"eval_recall": 0.898238747553816, |
|
"eval_runtime": 66.249, |
|
"eval_samples_per_second": 15.427, |
|
"eval_steps_per_second": 0.483, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 3.956291437149048, |
|
"learning_rate": 0.00017599941787327925, |
|
"loss": 0.4095, |
|
"step": 768 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8209393346379648, |
|
"eval_f1": 0.8271954674220963, |
|
"eval_loss": 0.4081785976886749, |
|
"eval_mcc": 0.6435679288765915, |
|
"eval_precision": 0.7992700729927007, |
|
"eval_recall": 0.8571428571428571, |
|
"eval_runtime": 66.2882, |
|
"eval_samples_per_second": 15.418, |
|
"eval_steps_per_second": 0.483, |
|
"step": 768 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 5.7001824378967285, |
|
"learning_rate": 0.00011733294524885283, |
|
"loss": 0.3959, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.8170254403131115, |
|
"eval_f1": 0.826046511627907, |
|
"eval_loss": 0.41252297163009644, |
|
"eval_mcc": 0.6374890350533482, |
|
"eval_precision": 0.7872340425531915, |
|
"eval_recall": 0.8688845401174168, |
|
"eval_runtime": 66.4475, |
|
"eval_samples_per_second": 15.381, |
|
"eval_steps_per_second": 0.482, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 1.9468530416488647, |
|
"learning_rate": 5.8666472624426415e-05, |
|
"loss": 0.3889, |
|
"step": 1152 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.8160469667318982, |
|
"eval_f1": 0.8324420677361855, |
|
"eval_loss": 0.40997278690338135, |
|
"eval_mcc": 0.6445565206113043, |
|
"eval_precision": 0.7643207855973814, |
|
"eval_recall": 0.913894324853229, |
|
"eval_runtime": 66.9893, |
|
"eval_samples_per_second": 15.256, |
|
"eval_steps_per_second": 0.478, |
|
"step": 1152 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 1.8196734189987183, |
|
"learning_rate": 0.0, |
|
"loss": 0.3804, |
|
"step": 1344 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.8209393346379648, |
|
"eval_f1": 0.8297674418604651, |
|
"eval_loss": 0.405351847410202, |
|
"eval_mcc": 0.6453592700540068, |
|
"eval_precision": 0.7907801418439716, |
|
"eval_recall": 0.87279843444227, |
|
"eval_runtime": 66.7109, |
|
"eval_samples_per_second": 15.32, |
|
"eval_steps_per_second": 0.48, |
|
"step": 1344 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 1344, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 7, |
|
"save_steps": 500, |
|
"total_flos": 1649866380960.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.7654129557516562, |
|
"learning_rate": 0.00041066530837098494, |
|
"num_train_epochs": 7, |
|
"per_device_train_batch_size": 16, |
|
"temperature": 40 |
|
} |
|
} |
|
|