{ "best_metric": 0.824853228962818, "best_model_checkpoint": "tiny-bert-sst2-distilled/run-47/checkpoint-384", "epoch": 4.0, "eval_steps": 500, "global_step": 384, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "grad_norm": 1.9338581562042236, "learning_rate": 0.0001802406049279757, "loss": 0.5354, "step": 96 }, { "epoch": 1.0, "eval_accuracy": 0.7671232876712328, "eval_f1": 0.8023255813953489, "eval_loss": 0.4617577791213989, "eval_precision": 0.696969696969697, "eval_recall": 0.9452054794520548, "eval_runtime": 29.4455, "eval_samples_per_second": 34.708, "eval_steps_per_second": 1.087, "step": 96 }, { "epoch": 2.0, "grad_norm": 2.99806547164917, "learning_rate": 0.00015771052931197875, "loss": 0.4503, "step": 192 }, { "epoch": 2.0, "eval_accuracy": 0.8043052837573386, "eval_f1": 0.8242530755711774, "eval_loss": 0.42502960562705994, "eval_precision": 0.748006379585327, "eval_recall": 0.9178082191780822, "eval_runtime": 28.5574, "eval_samples_per_second": 35.788, "eval_steps_per_second": 1.121, "step": 192 }, { "epoch": 3.0, "grad_norm": 2.956643581390381, "learning_rate": 0.00013518045369598176, "loss": 0.4243, "step": 288 }, { "epoch": 3.0, "eval_accuracy": 0.8238747553816047, "eval_f1": 0.8351648351648352, "eval_loss": 0.4111813008785248, "eval_precision": 0.7848537005163512, "eval_recall": 0.8923679060665362, "eval_runtime": 27.7197, "eval_samples_per_second": 36.869, "eval_steps_per_second": 1.154, "step": 288 }, { "epoch": 4.0, "grad_norm": 3.8551297187805176, "learning_rate": 0.00011265037807998481, "loss": 0.409, "step": 384 }, { "epoch": 4.0, "eval_accuracy": 0.824853228962818, "eval_f1": 0.8306527909176915, "eval_loss": 0.40299269556999207, "eval_precision": 0.8040293040293041, "eval_recall": 0.8590998043052838, "eval_runtime": 27.9157, "eval_samples_per_second": 36.61, "eval_steps_per_second": 1.146, "step": 384 } ], "logging_steps": 500, "max_steps": 864, "num_input_tokens_seen": 0, "num_train_epochs": 9, "save_steps": 500, "total_flos": 942780789120.0, "train_batch_size": 32, "trial_name": null, "trial_params": { "alpha": 0.8114321139277666, "learning_rate": 0.00020277068054397267, "num_train_epochs": 9, "temperature": 7 } }