|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.02328830926874709, |
|
"eval_steps": 10, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0004657661853749418, |
|
"eval_loss": 4.151027679443359, |
|
"eval_runtime": 56.7914, |
|
"eval_samples_per_second": 15.936, |
|
"eval_steps_per_second": 7.977, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002328830926874709, |
|
"grad_norm": 25.35050392150879, |
|
"learning_rate": 5e-05, |
|
"loss": 15.257, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004657661853749418, |
|
"grad_norm": 46.72882843017578, |
|
"learning_rate": 0.0001, |
|
"loss": 15.4873, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.004657661853749418, |
|
"eval_loss": 3.0870630741119385, |
|
"eval_runtime": 57.6938, |
|
"eval_samples_per_second": 15.686, |
|
"eval_steps_per_second": 7.852, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0069864927806241265, |
|
"grad_norm": 36.1671028137207, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 12.1358, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.009315323707498836, |
|
"grad_norm": 34.362125396728516, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 11.3998, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.009315323707498836, |
|
"eval_loss": 2.732847213745117, |
|
"eval_runtime": 57.8298, |
|
"eval_samples_per_second": 15.649, |
|
"eval_steps_per_second": 7.833, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.011644154634373545, |
|
"grad_norm": 24.70348358154297, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 10.9537, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.013972985561248253, |
|
"grad_norm": 39.36613464355469, |
|
"learning_rate": 5e-05, |
|
"loss": 11.1584, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.013972985561248253, |
|
"eval_loss": 2.6284070014953613, |
|
"eval_runtime": 57.9092, |
|
"eval_samples_per_second": 15.628, |
|
"eval_steps_per_second": 7.823, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.016301816488122962, |
|
"grad_norm": 40.228370666503906, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 9.9753, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.018630647414997672, |
|
"grad_norm": 36.44306182861328, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 10.6079, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.018630647414997672, |
|
"eval_loss": 2.5764482021331787, |
|
"eval_runtime": 57.7996, |
|
"eval_samples_per_second": 15.658, |
|
"eval_steps_per_second": 7.837, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.02095947834187238, |
|
"grad_norm": 36.49095153808594, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 10.6934, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.02328830926874709, |
|
"grad_norm": 39.59294128417969, |
|
"learning_rate": 0.0, |
|
"loss": 9.9457, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02328830926874709, |
|
"eval_loss": 2.5675735473632812, |
|
"eval_runtime": 57.7456, |
|
"eval_samples_per_second": 15.672, |
|
"eval_steps_per_second": 7.845, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 13, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8763348929740800.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|