{ "best_metric": 0.8111545988258317, "best_model_checkpoint": "tiny-bert-sst2-distilled/run-10/checkpoint-192", "epoch": 4.0, "eval_steps": 500, "global_step": 256, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "grad_norm": 7.969868183135986, "learning_rate": 0.0006324213595544264, "loss": 0.5787, "step": 64 }, { "epoch": 1.0, "eval_accuracy": 0.773972602739726, "eval_f1": 0.8070175438596492, "eval_loss": 0.48913490772247314, "eval_precision": 0.7040816326530612, "eval_recall": 0.9452054794520548, "eval_runtime": 27.7283, "eval_samples_per_second": 36.858, "eval_steps_per_second": 1.154, "step": 64 }, { "epoch": 2.0, "grad_norm": 2.9250471591949463, "learning_rate": 0.00047431601966581977, "loss": 0.46, "step": 128 }, { "epoch": 2.0, "eval_accuracy": 0.799412915851272, "eval_f1": 0.8138056312443234, "eval_loss": 0.4400319457054138, "eval_precision": 0.7593220338983051, "eval_recall": 0.8767123287671232, "eval_runtime": 28.2626, "eval_samples_per_second": 36.161, "eval_steps_per_second": 1.132, "step": 128 }, { "epoch": 3.0, "grad_norm": 3.8848695755004883, "learning_rate": 0.0003162106797772132, "loss": 0.4251, "step": 192 }, { "epoch": 3.0, "eval_accuracy": 0.8111545988258317, "eval_f1": 0.8170616113744076, "eval_loss": 0.4429156482219696, "eval_precision": 0.7922794117647058, "eval_recall": 0.8434442270058709, "eval_runtime": 29.3102, "eval_samples_per_second": 34.868, "eval_steps_per_second": 1.092, "step": 192 }, { "epoch": 4.0, "grad_norm": 6.6206135749816895, "learning_rate": 0.0001581053398886066, "loss": 0.4016, "step": 256 }, { "epoch": 4.0, "eval_accuracy": 0.8023483365949119, "eval_f1": 0.8027343750000001, "eval_loss": 0.42793938517570496, "eval_precision": 0.8011695906432749, "eval_recall": 0.8043052837573386, "eval_runtime": 28.4384, "eval_samples_per_second": 35.937, "eval_steps_per_second": 1.125, "step": 256 } ], "logging_steps": 500, "max_steps": 320, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 500, "total_flos": 942780789120.0, "train_batch_size": 48, "trial_name": null, "trial_params": { "alpha": 0.9990297556295253, "learning_rate": 0.000790526699443033, "num_train_epochs": 5, "per_device_train_batch_size": 48, "temperature": 30 } }