{ "best_metric": 1.3362245559692383, "best_model_checkpoint": "miner_id_24/checkpoint-25", "epoch": 0.002224298233907202, "eval_steps": 5, "global_step": 25, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 8.897192935628809e-05, "grad_norm": 2.632169008255005, "learning_rate": 2e-05, "loss": 11.6821, "step": 1 }, { "epoch": 8.897192935628809e-05, "eval_loss": 2.557339668273926, "eval_runtime": 651.5004, "eval_samples_per_second": 7.265, "eval_steps_per_second": 3.633, "step": 1 }, { "epoch": 0.00017794385871257618, "grad_norm": 3.087557792663574, "learning_rate": 4e-05, "loss": 9.6414, "step": 2 }, { "epoch": 0.0002669157880688643, "grad_norm": 3.1473586559295654, "learning_rate": 6e-05, "loss": 11.2277, "step": 3 }, { "epoch": 0.00035588771742515237, "grad_norm": 2.937967300415039, "learning_rate": 8e-05, "loss": 9.7278, "step": 4 }, { "epoch": 0.00044485964678144045, "grad_norm": 3.6837217807769775, "learning_rate": 0.0001, "loss": 11.0646, "step": 5 }, { "epoch": 0.00044485964678144045, "eval_loss": 2.399348258972168, "eval_runtime": 647.6116, "eval_samples_per_second": 7.308, "eval_steps_per_second": 3.655, "step": 5 }, { "epoch": 0.0005338315761377286, "grad_norm": 4.098256587982178, "learning_rate": 0.00012, "loss": 9.5806, "step": 6 }, { "epoch": 0.0006228035054940167, "grad_norm": 4.853851318359375, "learning_rate": 0.00014, "loss": 9.2953, "step": 7 }, { "epoch": 0.0007117754348503047, "grad_norm": 6.96266508102417, "learning_rate": 0.00016, "loss": 7.8354, "step": 8 }, { "epoch": 0.0008007473642065928, "grad_norm": 5.600890159606934, "learning_rate": 0.00018, "loss": 6.4035, "step": 9 }, { "epoch": 0.0008897192935628809, "grad_norm": 4.275112152099609, "learning_rate": 0.0002, "loss": 6.6919, "step": 10 }, { "epoch": 0.0008897192935628809, "eval_loss": 1.5063281059265137, "eval_runtime": 647.5218, "eval_samples_per_second": 7.309, "eval_steps_per_second": 3.655, "step": 10 }, { "epoch": 0.000978691222919169, "grad_norm": 6.842349529266357, "learning_rate": 0.00019781476007338058, "loss": 5.7354, "step": 11 }, { "epoch": 0.0010676631522754572, "grad_norm": 7.970447063446045, "learning_rate": 0.0001913545457642601, "loss": 5.5847, "step": 12 }, { "epoch": 0.0011566350816317452, "grad_norm": 6.706606864929199, "learning_rate": 0.00018090169943749476, "loss": 4.7859, "step": 13 }, { "epoch": 0.0012456070109880333, "grad_norm": 10.090574264526367, "learning_rate": 0.00016691306063588583, "loss": 6.1743, "step": 14 }, { "epoch": 0.0013345789403443214, "grad_norm": 6.294565200805664, "learning_rate": 0.00015000000000000001, "loss": 5.0437, "step": 15 }, { "epoch": 0.0013345789403443214, "eval_loss": 1.3769153356552124, "eval_runtime": 643.8141, "eval_samples_per_second": 7.352, "eval_steps_per_second": 3.677, "step": 15 }, { "epoch": 0.0014235508697006095, "grad_norm": 11.344293594360352, "learning_rate": 0.00013090169943749476, "loss": 6.4785, "step": 16 }, { "epoch": 0.0015125227990568976, "grad_norm": 7.5362138748168945, "learning_rate": 0.00011045284632676536, "loss": 4.5365, "step": 17 }, { "epoch": 0.0016014947284131856, "grad_norm": 9.856901168823242, "learning_rate": 8.954715367323468e-05, "loss": 6.8501, "step": 18 }, { "epoch": 0.0016904666577694737, "grad_norm": 9.563337326049805, "learning_rate": 6.909830056250527e-05, "loss": 5.8593, "step": 19 }, { "epoch": 0.0017794385871257618, "grad_norm": 7.136096000671387, "learning_rate": 5.000000000000002e-05, "loss": 4.8991, "step": 20 }, { "epoch": 0.0017794385871257618, "eval_loss": 1.344485878944397, "eval_runtime": 651.4638, "eval_samples_per_second": 7.265, "eval_steps_per_second": 3.633, "step": 20 }, { "epoch": 0.0018684105164820499, "grad_norm": 6.953361988067627, "learning_rate": 3.308693936411421e-05, "loss": 4.7495, "step": 21 }, { "epoch": 0.001957382445838338, "grad_norm": 10.12816333770752, "learning_rate": 1.9098300562505266e-05, "loss": 5.2026, "step": 22 }, { "epoch": 0.002046354375194626, "grad_norm": 7.440849304199219, "learning_rate": 8.645454235739903e-06, "loss": 5.1348, "step": 23 }, { "epoch": 0.0021353263045509143, "grad_norm": 7.2502264976501465, "learning_rate": 2.1852399266194314e-06, "loss": 4.5911, "step": 24 }, { "epoch": 0.002224298233907202, "grad_norm": 8.127649307250977, "learning_rate": 0.0, "loss": 5.734, "step": 25 }, { "epoch": 0.002224298233907202, "eval_loss": 1.3362245559692383, "eval_runtime": 652.7271, "eval_samples_per_second": 7.251, "eval_steps_per_second": 3.626, "step": 25 } ], "logging_steps": 1, "max_steps": 25, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 10, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 2, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 5305553599856640.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }