{ "best_metric": 0.6237623762376238, "best_model_checkpoint": "tiny-bert-sst2-distilled/run-4/checkpoint-1782", "epoch": 6.0, "eval_steps": 500, "global_step": 1782, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "grad_norm": 1.202705979347229, "learning_rate": 6.504485936113838e-05, "loss": 0.5495, "step": 297 }, { "epoch": 1.0, "eval_accuracy": 0.5069306930693069, "eval_f1": 0.10108303249097472, "eval_loss": 0.5468167662620544, "eval_mcc": 0.027838098756040194, "eval_precision": 0.56, "eval_recall": 0.05555555555555555, "eval_runtime": 0.9316, "eval_samples_per_second": 542.063, "eval_steps_per_second": 17.174, "step": 297 }, { "epoch": 2.0, "grad_norm": 0.8953370451927185, "learning_rate": 5.4204049467615325e-05, "loss": 0.54, "step": 594 }, { "epoch": 2.0, "eval_accuracy": 0.594059405940594, "eval_f1": 0.5858585858585857, "eval_loss": 0.5394836068153381, "eval_mcc": 0.18817791261380143, "eval_precision": 0.5967078189300411, "eval_recall": 0.5753968253968254, "eval_runtime": 0.9335, "eval_samples_per_second": 540.967, "eval_steps_per_second": 17.14, "step": 594 }, { "epoch": 3.0, "grad_norm": 1.554856777191162, "learning_rate": 4.336323957409226e-05, "loss": 0.5316, "step": 891 }, { "epoch": 3.0, "eval_accuracy": 0.5861386138613861, "eval_f1": 0.6365217391304347, "eval_loss": 0.5389003157615662, "eval_mcc": 0.17998847484487682, "eval_precision": 0.56656346749226, "eval_recall": 0.7261904761904762, "eval_runtime": 0.9343, "eval_samples_per_second": 540.483, "eval_steps_per_second": 17.124, "step": 891 }, { "epoch": 4.0, "grad_norm": 1.0591225624084473, "learning_rate": 3.252242968056919e-05, "loss": 0.527, "step": 1188 }, { "epoch": 4.0, "eval_accuracy": 0.6118811881188119, "eval_f1": 0.6287878787878789, "eval_loss": 0.5312101244926453, "eval_mcc": 0.22492336097439145, "eval_precision": 0.6014492753623188, "eval_recall": 0.6587301587301587, "eval_runtime": 0.9347, "eval_samples_per_second": 540.304, "eval_steps_per_second": 17.119, "step": 1188 }, { "epoch": 5.0, "grad_norm": 1.7271960973739624, "learning_rate": 2.168161978704613e-05, "loss": 0.5226, "step": 1485 }, { "epoch": 5.0, "eval_accuracy": 0.6138613861386139, "eval_f1": 0.6183953033268101, "eval_loss": 0.5254874229431152, "eval_mcc": 0.22784970249121222, "eval_precision": 0.61003861003861, "eval_recall": 0.626984126984127, "eval_runtime": 0.9248, "eval_samples_per_second": 546.078, "eval_steps_per_second": 17.301, "step": 1485 }, { "epoch": 6.0, "grad_norm": 1.6252127885818481, "learning_rate": 1.0840809893523065e-05, "loss": 0.5188, "step": 1782 }, { "epoch": 6.0, "eval_accuracy": 0.6237623762376238, "eval_f1": 0.6468401486988847, "eval_loss": 0.5257902145385742, "eval_mcc": 0.24999798285978939, "eval_precision": 0.6083916083916084, "eval_recall": 0.6904761904761905, "eval_runtime": 0.9286, "eval_samples_per_second": 543.832, "eval_steps_per_second": 17.23, "step": 1782 } ], "logging_steps": 500, "max_steps": 2079, "num_input_tokens_seen": 0, "num_train_epochs": 7, "save_steps": 500, "total_flos": 4384207595880.0, "train_batch_size": 32, "trial_name": null, "trial_params": { "alpha": 0.7404813991868276, "learning_rate": 7.588566925466145e-05, "num_train_epochs": 7, "temperature": 47 } }