|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0217391304347827, |
|
"eval_steps": 4, |
|
"global_step": 13, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07729468599033816, |
|
"grad_norm": 29.966806411743164, |
|
"learning_rate": 5e-05, |
|
"loss": 354.7632, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07729468599033816, |
|
"eval_loss": 11.087127685546875, |
|
"eval_runtime": 0.8448, |
|
"eval_samples_per_second": 104.163, |
|
"eval_steps_per_second": 26.041, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.15458937198067632, |
|
"grad_norm": 31.932706832885742, |
|
"learning_rate": 0.0001, |
|
"loss": 354.9215, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.2318840579710145, |
|
"grad_norm": 30.634830474853516, |
|
"learning_rate": 9.797464868072488e-05, |
|
"loss": 354.4944, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.30917874396135264, |
|
"grad_norm": 31.755876541137695, |
|
"learning_rate": 9.206267664155907e-05, |
|
"loss": 354.2277, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.30917874396135264, |
|
"eval_loss": 11.05913257598877, |
|
"eval_runtime": 0.3543, |
|
"eval_samples_per_second": 248.368, |
|
"eval_steps_per_second": 62.092, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.3864734299516908, |
|
"grad_norm": 31.669504165649414, |
|
"learning_rate": 8.274303669726426e-05, |
|
"loss": 353.9269, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.463768115942029, |
|
"grad_norm": 32.336524963378906, |
|
"learning_rate": 7.077075065009433e-05, |
|
"loss": 353.4835, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.5410628019323671, |
|
"grad_norm": 31.395036697387695, |
|
"learning_rate": 5.7115741913664264e-05, |
|
"loss": 353.1369, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.6183574879227053, |
|
"grad_norm": 31.216983795166016, |
|
"learning_rate": 4.288425808633575e-05, |
|
"loss": 352.7697, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.6183574879227053, |
|
"eval_loss": 11.021666526794434, |
|
"eval_runtime": 0.3549, |
|
"eval_samples_per_second": 247.956, |
|
"eval_steps_per_second": 61.989, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.6956521739130435, |
|
"grad_norm": 32.23792266845703, |
|
"learning_rate": 2.9229249349905684e-05, |
|
"loss": 352.3179, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.7729468599033816, |
|
"grad_norm": 32.02090072631836, |
|
"learning_rate": 1.725696330273575e-05, |
|
"loss": 352.533, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.8502415458937198, |
|
"grad_norm": 31.508159637451172, |
|
"learning_rate": 7.937323358440935e-06, |
|
"loss": 352.3194, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.927536231884058, |
|
"grad_norm": 32.63449478149414, |
|
"learning_rate": 2.0253513192751373e-06, |
|
"loss": 352.3684, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.927536231884058, |
|
"eval_loss": 11.013041496276855, |
|
"eval_runtime": 0.3545, |
|
"eval_samples_per_second": 248.25, |
|
"eval_steps_per_second": 62.062, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.0217391304347827, |
|
"grad_norm": 32.89696502685547, |
|
"learning_rate": 0.0, |
|
"loss": 352.2435, |
|
"step": 13 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 13, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 4, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 17305228541952.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|