|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.16647398843930636, |
|
"eval_steps": 9, |
|
"global_step": 36, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.004624277456647399, |
|
"eval_loss": 4.693221569061279, |
|
"eval_runtime": 23.3821, |
|
"eval_samples_per_second": 15.61, |
|
"eval_steps_per_second": 1.967, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.013872832369942197, |
|
"grad_norm": 6.138703346252441, |
|
"learning_rate": 1.5e-05, |
|
"loss": 18.4969, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.027745664739884393, |
|
"grad_norm": 6.098344802856445, |
|
"learning_rate": 3e-05, |
|
"loss": 18.5023, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.04161849710982659, |
|
"grad_norm": 8.08814525604248, |
|
"learning_rate": 4.5e-05, |
|
"loss": 18.2983, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.04161849710982659, |
|
"eval_loss": 4.422519207000732, |
|
"eval_runtime": 23.2409, |
|
"eval_samples_per_second": 15.705, |
|
"eval_steps_per_second": 1.979, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.055491329479768786, |
|
"grad_norm": 13.294960975646973, |
|
"learning_rate": 4.993910125649561e-05, |
|
"loss": 17.6258, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.06936416184971098, |
|
"grad_norm": 12.818150520324707, |
|
"learning_rate": 4.962019382530521e-05, |
|
"loss": 13.5799, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.08323699421965318, |
|
"grad_norm": 11.56785774230957, |
|
"learning_rate": 4.9031542398457974e-05, |
|
"loss": 11.4697, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.08323699421965318, |
|
"eval_loss": 2.661552906036377, |
|
"eval_runtime": 23.3554, |
|
"eval_samples_per_second": 15.628, |
|
"eval_steps_per_second": 1.97, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.09710982658959537, |
|
"grad_norm": 11.117833137512207, |
|
"learning_rate": 4.817959636416969e-05, |
|
"loss": 9.6316, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.11098265895953757, |
|
"grad_norm": 14.03640079498291, |
|
"learning_rate": 4.707368982147318e-05, |
|
"loss": 10.2925, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.12485549132947976, |
|
"grad_norm": 12.930510520935059, |
|
"learning_rate": 4.572593931387604e-05, |
|
"loss": 9.8234, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.12485549132947976, |
|
"eval_loss": 2.4066836833953857, |
|
"eval_runtime": 23.3713, |
|
"eval_samples_per_second": 15.617, |
|
"eval_steps_per_second": 1.968, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.13872832369942195, |
|
"grad_norm": 13.057132720947266, |
|
"learning_rate": 4.415111107797445e-05, |
|
"loss": 8.9095, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.15260115606936417, |
|
"grad_norm": 13.501303672790527, |
|
"learning_rate": 4.2366459261474933e-05, |
|
"loss": 9.0641, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.16647398843930636, |
|
"grad_norm": 13.990789413452148, |
|
"learning_rate": 4.039153688314145e-05, |
|
"loss": 8.5, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.16647398843930636, |
|
"eval_loss": 2.281359910964966, |
|
"eval_runtime": 23.3762, |
|
"eval_samples_per_second": 15.614, |
|
"eval_steps_per_second": 1.968, |
|
"step": 36 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 9, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.713086754540749e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|