{ "best_metric": 0.6674509803921569, "best_model_checkpoint": "tiny-bert-sst2-distilled/run-5/checkpoint-320", "epoch": 4.0, "eval_steps": 500, "global_step": 640, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "grad_norm": 0.9986019134521484, "learning_rate": 6.067732661916699e-05, "loss": 0.3474, "step": 160 }, { "epoch": 1.0, "eval_accuracy": 0.6666666666666666, "eval_f1": 0.0, "eval_loss": 0.33498698472976685, "eval_mcc": 0.0, "eval_precision": 0.0, "eval_recall": 0.0, "eval_runtime": 1.8603, "eval_samples_per_second": 685.372, "eval_steps_per_second": 21.502, "step": 160 }, { "epoch": 2.0, "grad_norm": 0.9157262444496155, "learning_rate": 4.045155107944466e-05, "loss": 0.3312, "step": 320 }, { "epoch": 2.0, "eval_accuracy": 0.6674509803921569, "eval_f1": 0.004694835680751173, "eval_loss": 0.3251466453075409, "eval_mcc": 0.03962144258751637, "eval_precision": 1.0, "eval_recall": 0.002352941176470588, "eval_runtime": 1.8638, "eval_samples_per_second": 684.076, "eval_steps_per_second": 21.461, "step": 320 }, { "epoch": 3.0, "grad_norm": 1.6788557767868042, "learning_rate": 2.022577553972233e-05, "loss": 0.3247, "step": 480 }, { "epoch": 3.0, "eval_accuracy": 0.6666666666666666, "eval_f1": 0.049217002237136466, "eval_loss": 0.32314401865005493, "eval_mcc": 0.046847973719895464, "eval_precision": 0.5, "eval_recall": 0.02588235294117647, "eval_runtime": 1.8829, "eval_samples_per_second": 677.158, "eval_steps_per_second": 21.244, "step": 480 }, { "epoch": 4.0, "grad_norm": 0.6379467844963074, "learning_rate": 0.0, "loss": 0.3227, "step": 640 }, { "epoch": 4.0, "eval_accuracy": 0.6666666666666666, "eval_f1": 0.018475750577367205, "eval_loss": 0.3209414780139923, "eval_mcc": 0.028093878027715367, "eval_precision": 0.5, "eval_recall": 0.009411764705882352, "eval_runtime": 1.8827, "eval_samples_per_second": 677.207, "eval_steps_per_second": 21.246, "step": 640 } ], "logging_steps": 500, "max_steps": 640, "num_input_tokens_seen": 0, "num_train_epochs": 4, "save_steps": 500, "total_flos": 1167021751680.0, "train_batch_size": 32, "trial_name": null, "trial_params": { "alpha": 0.5122281577891578, "learning_rate": 8.090310215888932e-05, "num_train_epochs": 4, "temperature": 14 } }