|
{ |
|
"best_metric": 0.5742574257425742, |
|
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-2/checkpoint-5346", |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 5940, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.7887440323829651, |
|
"learning_rate": 0.00015586356006012654, |
|
"loss": 0.3246, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.5148514851485149, |
|
"eval_f1": 0.16382252559726962, |
|
"eval_loss": 0.3221835792064667, |
|
"eval_precision": 0.5853658536585366, |
|
"eval_recall": 0.09523809523809523, |
|
"eval_runtime": 15.4228, |
|
"eval_samples_per_second": 32.744, |
|
"eval_steps_per_second": 1.037, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.6338050961494446, |
|
"learning_rate": 0.00013854538672011249, |
|
"loss": 0.3178, |
|
"step": 1188 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.5168316831683168, |
|
"eval_f1": 0.17006802721088435, |
|
"eval_loss": 0.31132203340530396, |
|
"eval_precision": 0.5952380952380952, |
|
"eval_recall": 0.0992063492063492, |
|
"eval_runtime": 15.3986, |
|
"eval_samples_per_second": 32.795, |
|
"eval_steps_per_second": 1.039, |
|
"step": 1188 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 1.1013684272766113, |
|
"learning_rate": 0.0001212272133800984, |
|
"loss": 0.3119, |
|
"step": 1782 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.5207920792079208, |
|
"eval_f1": 0.1986754966887417, |
|
"eval_loss": 0.3059851825237274, |
|
"eval_precision": 0.6, |
|
"eval_recall": 0.11904761904761904, |
|
"eval_runtime": 15.2233, |
|
"eval_samples_per_second": 33.173, |
|
"eval_steps_per_second": 1.051, |
|
"step": 1782 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 1.1249533891677856, |
|
"learning_rate": 0.00010390904004008435, |
|
"loss": 0.3068, |
|
"step": 2376 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.5287128712871287, |
|
"eval_f1": 0.1793103448275862, |
|
"eval_loss": 0.3041296601295471, |
|
"eval_precision": 0.6842105263157895, |
|
"eval_recall": 0.10317460317460317, |
|
"eval_runtime": 15.2189, |
|
"eval_samples_per_second": 33.183, |
|
"eval_steps_per_second": 1.051, |
|
"step": 2376 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 1.003902554512024, |
|
"learning_rate": 8.65908667000703e-05, |
|
"loss": 0.3031, |
|
"step": 2970 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.5465346534653466, |
|
"eval_f1": 0.23411371237458192, |
|
"eval_loss": 0.3002361059188843, |
|
"eval_precision": 0.7446808510638298, |
|
"eval_recall": 0.1388888888888889, |
|
"eval_runtime": 15.2406, |
|
"eval_samples_per_second": 33.135, |
|
"eval_steps_per_second": 1.05, |
|
"step": 2970 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 0.5786848068237305, |
|
"learning_rate": 6.927269336005624e-05, |
|
"loss": 0.3004, |
|
"step": 3564 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.5683168316831683, |
|
"eval_f1": 0.30128205128205127, |
|
"eval_loss": 0.2976093292236328, |
|
"eval_precision": 0.7833333333333333, |
|
"eval_recall": 0.1865079365079365, |
|
"eval_runtime": 15.1978, |
|
"eval_samples_per_second": 33.228, |
|
"eval_steps_per_second": 1.053, |
|
"step": 3564 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 0.880638599395752, |
|
"learning_rate": 5.1954520020042175e-05, |
|
"loss": 0.2983, |
|
"step": 4158 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.5584158415841585, |
|
"eval_f1": 0.29652996845425866, |
|
"eval_loss": 0.29786166548728943, |
|
"eval_precision": 0.7230769230769231, |
|
"eval_recall": 0.1865079365079365, |
|
"eval_runtime": 15.1508, |
|
"eval_samples_per_second": 33.332, |
|
"eval_steps_per_second": 1.056, |
|
"step": 4158 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 0.8452739119529724, |
|
"learning_rate": 3.463634668002812e-05, |
|
"loss": 0.296, |
|
"step": 4752 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.5722772277227722, |
|
"eval_f1": 0.3076923076923077, |
|
"eval_loss": 0.29773908853530884, |
|
"eval_precision": 0.8, |
|
"eval_recall": 0.19047619047619047, |
|
"eval_runtime": 15.0116, |
|
"eval_samples_per_second": 33.641, |
|
"eval_steps_per_second": 1.066, |
|
"step": 4752 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"grad_norm": 1.1398597955703735, |
|
"learning_rate": 1.731817334001406e-05, |
|
"loss": 0.2955, |
|
"step": 5346 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.5742574257425742, |
|
"eval_f1": 0.3260188087774295, |
|
"eval_loss": 0.2973213791847229, |
|
"eval_precision": 0.7761194029850746, |
|
"eval_recall": 0.20634920634920634, |
|
"eval_runtime": 15.1503, |
|
"eval_samples_per_second": 33.333, |
|
"eval_steps_per_second": 1.056, |
|
"step": 5346 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 1.1690034866333008, |
|
"learning_rate": 0.0, |
|
"loss": 0.2946, |
|
"step": 5940 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.5742574257425742, |
|
"eval_f1": 0.3260188087774295, |
|
"eval_loss": 0.2967524230480194, |
|
"eval_precision": 0.7761194029850746, |
|
"eval_recall": 0.20634920634920634, |
|
"eval_runtime": 15.5549, |
|
"eval_samples_per_second": 32.466, |
|
"eval_steps_per_second": 1.029, |
|
"step": 5940 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 5940, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 7778432831400.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.39860510957002215, |
|
"learning_rate": 0.0001731817334001406, |
|
"num_train_epochs": 10, |
|
"per_device_train_batch_size": 16, |
|
"temperature": 4 |
|
} |
|
} |
|
|