{ "best_metric": 0.6, "best_model_checkpoint": "tiny-bert-sst2-distilled/run-3/checkpoint-594", "epoch": 2.0, "eval_steps": 500, "global_step": 594, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "grad_norm": 0.9984288811683655, "learning_rate": 9.995670829688553e-05, "loss": 0.5706, "step": 297 }, { "epoch": 1.0, "eval_accuracy": 0.5128712871287129, "eval_loss": 0.5624967217445374, "eval_runtime": 13.6049, "eval_samples_per_second": 37.119, "eval_steps_per_second": 1.176, "step": 297 }, { "epoch": 2.0, "grad_norm": 0.9848873615264893, "learning_rate": 4.9978354148442764e-05, "loss": 0.5602, "step": 594 }, { "epoch": 2.0, "eval_accuracy": 0.6, "eval_loss": 0.5566068291664124, "eval_runtime": 13.9891, "eval_samples_per_second": 36.099, "eval_steps_per_second": 1.144, "step": 594 } ], "logging_steps": 500, "max_steps": 891, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 500, "total_flos": 1555686566280.0, "train_batch_size": 32, "trial_name": null, "trial_params": { "alpha": 0.7785816803005383, "learning_rate": 0.0001499350624453283, "num_train_epochs": 3, "temperature": 16 } }