{ "best_metric": 0.7710371819960861, "best_model_checkpoint": "tiny-bert-sst2-distilled/run-10/checkpoint-3064", "epoch": 4.0, "eval_steps": 500, "global_step": 3064, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "grad_norm": 2.387345790863037, "learning_rate": 9.274722577647405e-06, "loss": 0.6719, "step": 766 }, { "epoch": 1.0, "eval_accuracy": 0.6594911937377691, "eval_f1": 0.5786924939467312, "eval_loss": 0.6500130891799927, "eval_precision": 0.7587301587301587, "eval_recall": 0.46771037181996084, "eval_runtime": 133.0073, "eval_samples_per_second": 7.684, "eval_steps_per_second": 1.925, "step": 766 }, { "epoch": 2.0, "grad_norm": 11.753175735473633, "learning_rate": 6.956041933235553e-06, "loss": 0.6052, "step": 1532 }, { "epoch": 2.0, "eval_accuracy": 0.7250489236790607, "eval_f1": 0.7236971484759096, "eval_loss": 0.5506593585014343, "eval_precision": 0.7272727272727273, "eval_recall": 0.7201565557729941, "eval_runtime": 131.7286, "eval_samples_per_second": 7.758, "eval_steps_per_second": 1.943, "step": 1532 }, { "epoch": 3.0, "grad_norm": 16.92954444885254, "learning_rate": 4.637361288823702e-06, "loss": 0.5438, "step": 2298 }, { "epoch": 3.0, "eval_accuracy": 0.7583170254403131, "eval_f1": 0.7776777677767776, "eval_loss": 0.5147304534912109, "eval_precision": 0.72, "eval_recall": 0.8454011741682974, "eval_runtime": 132.3477, "eval_samples_per_second": 7.722, "eval_steps_per_second": 1.934, "step": 2298 }, { "epoch": 4.0, "grad_norm": 12.114532470703125, "learning_rate": 2.318680644411851e-06, "loss": 0.523, "step": 3064 }, { "epoch": 4.0, "eval_accuracy": 0.7710371819960861, "eval_f1": 0.7975778546712803, "eval_loss": 0.5050157904624939, "eval_precision": 0.7147286821705426, "eval_recall": 0.9021526418786693, "eval_runtime": 130.8317, "eval_samples_per_second": 7.812, "eval_steps_per_second": 1.957, "step": 3064 } ], "logging_steps": 500, "max_steps": 3830, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 500, "total_flos": 942780789120.0, "train_batch_size": 4, "trial_name": null, "trial_params": { "alpha": 0.9677948459483069, "learning_rate": 1.1593403222059255e-05, "num_train_epochs": 5, "temperature": 2 } }