{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.04839685420447671, "eval_steps": 5, "global_step": 20, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0024198427102238356, "grad_norm": NaN, "learning_rate": 2e-05, "loss": 0.0, "step": 1 }, { "epoch": 0.0024198427102238356, "eval_loss": NaN, "eval_runtime": 20.0808, "eval_samples_per_second": 8.665, "eval_steps_per_second": 4.332, "step": 1 }, { "epoch": 0.004839685420447671, "grad_norm": NaN, "learning_rate": 4e-05, "loss": 0.0, "step": 2 }, { "epoch": 0.007259528130671506, "grad_norm": NaN, "learning_rate": 6e-05, "loss": 0.0, "step": 3 }, { "epoch": 0.009679370840895343, "grad_norm": NaN, "learning_rate": 8e-05, "loss": 0.0, "step": 4 }, { "epoch": 0.012099213551119177, "grad_norm": NaN, "learning_rate": 0.0001, "loss": 0.0, "step": 5 }, { "epoch": 0.012099213551119177, "eval_loss": NaN, "eval_runtime": 19.3272, "eval_samples_per_second": 9.003, "eval_steps_per_second": 4.501, "step": 5 }, { "epoch": 0.014519056261343012, "grad_norm": NaN, "learning_rate": 0.00012, "loss": 0.0, "step": 6 }, { "epoch": 0.01693889897156685, "grad_norm": NaN, "learning_rate": 0.00014, "loss": 0.0, "step": 7 }, { "epoch": 0.019358741681790685, "grad_norm": NaN, "learning_rate": 0.00016, "loss": 0.0, "step": 8 }, { "epoch": 0.021778584392014518, "grad_norm": NaN, "learning_rate": 0.00018, "loss": 0.0, "step": 9 }, { "epoch": 0.024198427102238355, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 0.0, "step": 10 }, { "epoch": 0.024198427102238355, "eval_loss": NaN, "eval_runtime": 19.3407, "eval_samples_per_second": 8.997, "eval_steps_per_second": 4.498, "step": 10 }, { "epoch": 0.02661826981246219, "grad_norm": NaN, "learning_rate": 0.00019510565162951537, "loss": 0.0, "step": 11 }, { "epoch": 0.029038112522686024, "grad_norm": NaN, "learning_rate": 0.00018090169943749476, "loss": 0.0, "step": 12 }, { "epoch": 0.03145795523290986, "grad_norm": NaN, "learning_rate": 0.00015877852522924732, "loss": 0.0, "step": 13 }, { "epoch": 0.0338777979431337, "grad_norm": NaN, "learning_rate": 0.00013090169943749476, "loss": 0.0, "step": 14 }, { "epoch": 0.036297640653357534, "grad_norm": NaN, "learning_rate": 0.0001, "loss": 0.0, "step": 15 }, { "epoch": 0.036297640653357534, "eval_loss": NaN, "eval_runtime": 19.3315, "eval_samples_per_second": 9.001, "eval_steps_per_second": 4.5, "step": 15 }, { "epoch": 0.03871748336358137, "grad_norm": NaN, "learning_rate": 6.909830056250527e-05, "loss": 0.0, "step": 16 }, { "epoch": 0.0411373260738052, "grad_norm": NaN, "learning_rate": 4.12214747707527e-05, "loss": 0.0, "step": 17 }, { "epoch": 0.043557168784029036, "grad_norm": NaN, "learning_rate": 1.9098300562505266e-05, "loss": 0.0, "step": 18 }, { "epoch": 0.04597701149425287, "grad_norm": NaN, "learning_rate": 4.8943483704846475e-06, "loss": 0.0, "step": 19 }, { "epoch": 0.04839685420447671, "grad_norm": NaN, "learning_rate": 0.0, "loss": 0.0, "step": 20 }, { "epoch": 0.04839685420447671, "eval_loss": NaN, "eval_runtime": 19.3249, "eval_samples_per_second": 9.004, "eval_steps_per_second": 4.502, "step": 20 } ], "logging_steps": 1, "max_steps": 20, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 5, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.483774567120896e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }