{ "best_metric": 0.8140900195694716, "best_model_checkpoint": "tiny-bert-sst2-distilled/run-1/checkpoint-288", "epoch": 4.0, "eval_steps": 500, "global_step": 384, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "grad_norm": 2.6617980003356934, "learning_rate": 0.0006635023570917325, "loss": 0.5037, "step": 96 }, { "epoch": 1.0, "eval_accuracy": 0.7661448140900196, "eval_f1": 0.792714657415438, "eval_loss": 0.4430159628391266, "eval_precision": 0.7118380062305296, "eval_recall": 0.8943248532289628, "eval_runtime": 28.6854, "eval_samples_per_second": 35.628, "eval_steps_per_second": 1.116, "step": 96 }, { "epoch": 2.0, "grad_norm": 1.12320077419281, "learning_rate": 0.0004976267678187994, "loss": 0.4391, "step": 192 }, { "epoch": 2.0, "eval_accuracy": 0.8082191780821918, "eval_f1": 0.8227848101265822, "eval_loss": 0.41584911942481995, "eval_precision": 0.7647058823529411, "eval_recall": 0.8904109589041096, "eval_runtime": 28.5215, "eval_samples_per_second": 35.833, "eval_steps_per_second": 1.122, "step": 192 }, { "epoch": 3.0, "grad_norm": 2.946756362915039, "learning_rate": 0.00033175117854586627, "loss": 0.41, "step": 288 }, { "epoch": 3.0, "eval_accuracy": 0.8140900195694716, "eval_f1": 0.8260073260073261, "eval_loss": 0.43016910552978516, "eval_precision": 0.7762478485370051, "eval_recall": 0.8825831702544031, "eval_runtime": 28.1244, "eval_samples_per_second": 36.339, "eval_steps_per_second": 1.138, "step": 288 }, { "epoch": 4.0, "grad_norm": 3.868119478225708, "learning_rate": 0.00016587558927293314, "loss": 0.4041, "step": 384 }, { "epoch": 4.0, "eval_accuracy": 0.8033268101761253, "eval_f1": 0.802747791952895, "eval_loss": 0.4142443835735321, "eval_precision": 0.8051181102362205, "eval_recall": 0.8003913894324853, "eval_runtime": 29.1758, "eval_samples_per_second": 35.029, "eval_steps_per_second": 1.097, "step": 384 } ], "logging_steps": 500, "max_steps": 480, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 500, "total_flos": 942780789120.0, "train_batch_size": 32, "trial_name": null, "trial_params": { "alpha": 0.7818460169984094, "learning_rate": 0.0008293779463646656, "num_train_epochs": 5, "temperature": 3 } }