|
{ |
|
"best_metric": 0.7568627450980392, |
|
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-25/checkpoint-800", |
|
"epoch": 6.0, |
|
"eval_steps": 500, |
|
"global_step": 960, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 1.9096550941467285, |
|
"learning_rate": 0.00038222682011154285, |
|
"loss": 0.5297, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.6666666666666666, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.503944993019104, |
|
"eval_mcc": 0.0, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 1.8592, |
|
"eval_samples_per_second": 685.796, |
|
"eval_steps_per_second": 21.515, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 1.1312909126281738, |
|
"learning_rate": 0.0003185223500929524, |
|
"loss": 0.496, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.7396078431372549, |
|
"eval_f1": 0.5388888888888889, |
|
"eval_loss": 0.4670221507549286, |
|
"eval_mcc": 0.3774355933383126, |
|
"eval_precision": 0.6576271186440678, |
|
"eval_recall": 0.45647058823529413, |
|
"eval_runtime": 2.191, |
|
"eval_samples_per_second": 581.93, |
|
"eval_steps_per_second": 18.257, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 2.6883387565612793, |
|
"learning_rate": 0.0002548178800743619, |
|
"loss": 0.4793, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.7388235294117647, |
|
"eval_f1": 0.5138686131386861, |
|
"eval_loss": 0.4671221673488617, |
|
"eval_mcc": 0.36889273398279, |
|
"eval_precision": 0.676923076923077, |
|
"eval_recall": 0.41411764705882353, |
|
"eval_runtime": 1.8737, |
|
"eval_samples_per_second": 680.471, |
|
"eval_steps_per_second": 21.348, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 1.2410541772842407, |
|
"learning_rate": 0.00019111341005577143, |
|
"loss": 0.4728, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.7270588235294118, |
|
"eval_f1": 0.42, |
|
"eval_loss": 0.4601285755634308, |
|
"eval_mcc": 0.3271641233943044, |
|
"eval_precision": 0.72, |
|
"eval_recall": 0.2964705882352941, |
|
"eval_runtime": 2.208, |
|
"eval_samples_per_second": 577.447, |
|
"eval_steps_per_second": 18.116, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 4.100618362426758, |
|
"learning_rate": 0.00012740894003718096, |
|
"loss": 0.4617, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.7568627450980392, |
|
"eval_f1": 0.5730027548209367, |
|
"eval_loss": 0.45551422238349915, |
|
"eval_mcc": 0.4218176989662163, |
|
"eval_precision": 0.6910299003322259, |
|
"eval_recall": 0.4894117647058824, |
|
"eval_runtime": 1.8811, |
|
"eval_samples_per_second": 677.808, |
|
"eval_steps_per_second": 21.265, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 1.64212965965271, |
|
"learning_rate": 6.370447001859048e-05, |
|
"loss": 0.4528, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.756078431372549, |
|
"eval_f1": 0.5674547983310153, |
|
"eval_loss": 0.45275548100471497, |
|
"eval_mcc": 0.41870095673277924, |
|
"eval_precision": 0.6938775510204082, |
|
"eval_recall": 0.48, |
|
"eval_runtime": 1.9208, |
|
"eval_samples_per_second": 663.796, |
|
"eval_steps_per_second": 20.825, |
|
"step": 960 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 1120, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 7, |
|
"save_steps": 500, |
|
"total_flos": 1750532627520.0, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.8351933279450521, |
|
"learning_rate": 0.00044593129013013335, |
|
"num_train_epochs": 7, |
|
"temperature": 12 |
|
} |
|
} |
|
|