{ "best_metric": 0.8160469667318982, "best_model_checkpoint": "tiny-bert-sst2-distilled/run-13/checkpoint-192", "epoch": 4.0, "eval_steps": 500, "global_step": 192, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "grad_norm": 1.1932880878448486, "learning_rate": 0.00031905315423317427, "loss": 0.5324, "step": 48 }, { "epoch": 1.0, "eval_accuracy": 0.7514677103718199, "eval_f1": 0.7897350993377483, "eval_loss": 0.4841874837875366, "eval_precision": 0.6843615494978479, "eval_recall": 0.9334637964774951, "eval_runtime": 27.3845, "eval_samples_per_second": 37.32, "eval_steps_per_second": 0.584, "step": 48 }, { "epoch": 2.0, "grad_norm": 5.759732723236084, "learning_rate": 0.0002552425233865394, "loss": 0.4624, "step": 96 }, { "epoch": 2.0, "eval_accuracy": 0.7720156555772995, "eval_f1": 0.762487257900102, "eval_loss": 0.43731480836868286, "eval_precision": 0.7957446808510639, "eval_recall": 0.7318982387475538, "eval_runtime": 27.3758, "eval_samples_per_second": 37.332, "eval_steps_per_second": 0.584, "step": 96 }, { "epoch": 3.0, "grad_norm": 1.5165303945541382, "learning_rate": 0.00019143189253990455, "loss": 0.4346, "step": 144 }, { "epoch": 3.0, "eval_accuracy": 0.8091976516634051, "eval_f1": 0.8133971291866028, "eval_loss": 0.4113065004348755, "eval_precision": 0.795880149812734, "eval_recall": 0.8317025440313112, "eval_runtime": 26.8327, "eval_samples_per_second": 38.088, "eval_steps_per_second": 0.596, "step": 144 }, { "epoch": 4.0, "grad_norm": 2.3469491004943848, "learning_rate": 0.0001276212616932697, "loss": 0.4143, "step": 192 }, { "epoch": 4.0, "eval_accuracy": 0.8160469667318982, "eval_f1": 0.821969696969697, "eval_loss": 0.39889204502105713, "eval_precision": 0.7963302752293578, "eval_recall": 0.8493150684931506, "eval_runtime": 27.0864, "eval_samples_per_second": 37.731, "eval_steps_per_second": 0.591, "step": 192 } ], "logging_steps": 500, "max_steps": 288, "num_input_tokens_seen": 0, "num_train_epochs": 6, "save_steps": 500, "total_flos": 942780789120.0, "train_batch_size": 64, "trial_name": null, "trial_params": { "alpha": 0.804266474524187, "learning_rate": 0.0003828637850798091, "num_train_epochs": 6, "temperature": 12 } }