mistral-7b-instruct-v0.3-gsm8k_italian-7d2e682a-a612-417c-a0cb-d49415363f2e
/
last-checkpoint
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.0490075961774075, | |
"eval_steps": 10, | |
"global_step": 50, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.00098015192354815, | |
"eval_loss": 0.7627389430999756, | |
"eval_runtime": 26.8533, | |
"eval_samples_per_second": 16.013, | |
"eval_steps_per_second": 8.006, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.00490075961774075, | |
"grad_norm": 14.23064136505127, | |
"learning_rate": 5e-05, | |
"loss": 3.1124, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.0098015192354815, | |
"grad_norm": 9.638243675231934, | |
"learning_rate": 0.0001, | |
"loss": 2.274, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.0098015192354815, | |
"eval_loss": 0.39032769203186035, | |
"eval_runtime": 27.1203, | |
"eval_samples_per_second": 15.855, | |
"eval_steps_per_second": 7.928, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.014702278853222249, | |
"grad_norm": 15.856855392456055, | |
"learning_rate": 9.619397662556435e-05, | |
"loss": 1.2127, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.019603038470963, | |
"grad_norm": 8.120400428771973, | |
"learning_rate": 8.535533905932738e-05, | |
"loss": 1.085, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.019603038470963, | |
"eval_loss": 0.26506921648979187, | |
"eval_runtime": 27.2179, | |
"eval_samples_per_second": 15.798, | |
"eval_steps_per_second": 7.899, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.02450379808870375, | |
"grad_norm": 9.337193489074707, | |
"learning_rate": 6.91341716182545e-05, | |
"loss": 1.1599, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.029404557706444498, | |
"grad_norm": 6.683337688446045, | |
"learning_rate": 5e-05, | |
"loss": 1.0121, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.029404557706444498, | |
"eval_loss": 0.24212950468063354, | |
"eval_runtime": 27.4102, | |
"eval_samples_per_second": 15.688, | |
"eval_steps_per_second": 7.844, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.03430531732418525, | |
"grad_norm": 7.745213985443115, | |
"learning_rate": 3.086582838174551e-05, | |
"loss": 0.9605, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.039206076941926, | |
"grad_norm": 10.360566139221191, | |
"learning_rate": 1.4644660940672627e-05, | |
"loss": 0.7756, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.039206076941926, | |
"eval_loss": 0.23207147419452667, | |
"eval_runtime": 27.4395, | |
"eval_samples_per_second": 15.671, | |
"eval_steps_per_second": 7.835, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.04410683655966675, | |
"grad_norm": 6.0309343338012695, | |
"learning_rate": 3.8060233744356633e-06, | |
"loss": 0.7961, | |
"step": 45 | |
}, | |
{ | |
"epoch": 0.0490075961774075, | |
"grad_norm": 5.921755313873291, | |
"learning_rate": 0.0, | |
"loss": 0.835, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.0490075961774075, | |
"eval_loss": 0.2300577163696289, | |
"eval_runtime": 27.2186, | |
"eval_samples_per_second": 15.798, | |
"eval_steps_per_second": 7.899, | |
"step": 50 | |
} | |
], | |
"logging_steps": 5, | |
"max_steps": 50, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 1, | |
"save_steps": 13, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": true | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 8767214400307200.0, | |
"train_batch_size": 2, | |
"trial_name": null, | |
"trial_params": null | |
} | |