{ "best_metric": 0.5089108910891089, "best_model_checkpoint": "tiny-bert-sst2-distilled/run-2/checkpoint-297", "epoch": 3.0, "eval_steps": 500, "global_step": 891, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "grad_norm": 0.6225994825363159, "learning_rate": 0.0001586738890732749, "loss": 0.0237, "step": 297 }, { "epoch": 1.0, "eval_accuracy": 0.5089108910891089, "eval_loss": 0.015105457045137882, "eval_runtime": 57.753, "eval_samples_per_second": 8.744, "eval_steps_per_second": 0.277, "step": 297 }, { "epoch": 2.0, "grad_norm": 0.24433593451976776, "learning_rate": 7.933694453663745e-05, "loss": 0.0161, "step": 594 }, { "epoch": 2.0, "eval_accuracy": 0.504950495049505, "eval_loss": 0.014346625655889511, "eval_runtime": 56.8115, "eval_samples_per_second": 8.889, "eval_steps_per_second": 0.282, "step": 594 }, { "epoch": 3.0, "grad_norm": 0.19921059906482697, "learning_rate": 0.0, "loss": 0.0151, "step": 891 }, { "epoch": 3.0, "eval_accuracy": 0.504950495049505, "eval_loss": 0.013619398698210716, "eval_runtime": 56.0103, "eval_samples_per_second": 9.016, "eval_steps_per_second": 0.286, "step": 891 } ], "logging_steps": 500, "max_steps": 891, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 500, "total_flos": 2333529849420.0, "train_batch_size": 32, "trial_name": null, "trial_params": { "alpha": 0.007640219223822298, "learning_rate": 0.00023801083360991236, "num_train_epochs": 3, "temperature": 6 } }