|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.4767580452920143, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.004767580452920143, |
|
"eval_loss": 3.438885450363159, |
|
"eval_runtime": 23.2999, |
|
"eval_samples_per_second": 15.193, |
|
"eval_steps_per_second": 1.931, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.014302741358760428, |
|
"grad_norm": 1.0672996044158936, |
|
"learning_rate": 3e-05, |
|
"loss": 3.2121, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.028605482717520857, |
|
"grad_norm": 0.7437488436698914, |
|
"learning_rate": 6e-05, |
|
"loss": 3.1757, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.04290822407628129, |
|
"grad_norm": 1.009957194328308, |
|
"learning_rate": 9e-05, |
|
"loss": 3.1921, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.04290822407628129, |
|
"eval_loss": 3.1295535564422607, |
|
"eval_runtime": 23.6302, |
|
"eval_samples_per_second": 14.981, |
|
"eval_steps_per_second": 1.904, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.057210965435041714, |
|
"grad_norm": 0.9846349358558655, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 3.0455, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.07151370679380215, |
|
"grad_norm": 1.2678148746490479, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 2.9, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.08581644815256258, |
|
"grad_norm": 0.8464758396148682, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 2.647, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.08581644815256258, |
|
"eval_loss": 2.543804168701172, |
|
"eval_runtime": 23.7134, |
|
"eval_samples_per_second": 14.928, |
|
"eval_steps_per_second": 1.898, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.10011918951132301, |
|
"grad_norm": 0.8965421915054321, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 2.4721, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.11442193087008343, |
|
"grad_norm": 0.8901471495628357, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 2.4806, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.12872467222884387, |
|
"grad_norm": 0.6464989185333252, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 2.4984, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.12872467222884387, |
|
"eval_loss": 2.3897674083709717, |
|
"eval_runtime": 23.7248, |
|
"eval_samples_per_second": 14.921, |
|
"eval_steps_per_second": 1.897, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.1430274135876043, |
|
"grad_norm": 0.5345009565353394, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 2.2879, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1573301549463647, |
|
"grad_norm": 1.6660727262496948, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 2.1803, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.17163289630512515, |
|
"grad_norm": 0.7011714577674866, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 2.3269, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.17163289630512515, |
|
"eval_loss": 2.3226821422576904, |
|
"eval_runtime": 23.7388, |
|
"eval_samples_per_second": 14.912, |
|
"eval_steps_per_second": 1.896, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.18593563766388557, |
|
"grad_norm": 0.861555814743042, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 2.3854, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.20023837902264602, |
|
"grad_norm": 0.7908144593238831, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 2.4378, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.21454112038140644, |
|
"grad_norm": 0.7242131233215332, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 2.4475, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.21454112038140644, |
|
"eval_loss": 2.2817511558532715, |
|
"eval_runtime": 23.748, |
|
"eval_samples_per_second": 14.906, |
|
"eval_steps_per_second": 1.895, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.22884386174016685, |
|
"grad_norm": 0.6113729476928711, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 2.2601, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.2431466030989273, |
|
"grad_norm": 0.6754246950149536, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 2.1681, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.25744934445768775, |
|
"grad_norm": 0.6604123115539551, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 2.2141, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.25744934445768775, |
|
"eval_loss": 2.253026247024536, |
|
"eval_runtime": 23.7486, |
|
"eval_samples_per_second": 14.906, |
|
"eval_steps_per_second": 1.895, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.27175208581644816, |
|
"grad_norm": 0.7334719300270081, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 2.2945, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.2860548271752086, |
|
"grad_norm": 0.6848142147064209, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 2.3294, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.300357568533969, |
|
"grad_norm": 0.6209741234779358, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 2.09, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.300357568533969, |
|
"eval_loss": 2.230947732925415, |
|
"eval_runtime": 23.7116, |
|
"eval_samples_per_second": 14.929, |
|
"eval_steps_per_second": 1.898, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.3146603098927294, |
|
"grad_norm": 0.699060320854187, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 2.189, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.3289630512514899, |
|
"grad_norm": 0.6675814390182495, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 2.2713, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.3432657926102503, |
|
"grad_norm": 0.6715078353881836, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 2.1175, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.3432657926102503, |
|
"eval_loss": 2.2183046340942383, |
|
"eval_runtime": 23.7149, |
|
"eval_samples_per_second": 14.927, |
|
"eval_steps_per_second": 1.898, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.3575685339690107, |
|
"grad_norm": 0.7805127501487732, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 2.1241, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.37187127532777114, |
|
"grad_norm": 0.7199813723564148, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 2.1669, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.38617401668653156, |
|
"grad_norm": 0.8319717049598694, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 2.1235, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.38617401668653156, |
|
"eval_loss": 2.2104547023773193, |
|
"eval_runtime": 23.7384, |
|
"eval_samples_per_second": 14.913, |
|
"eval_steps_per_second": 1.896, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.40047675804529204, |
|
"grad_norm": 0.7010225057601929, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 2.3046, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.41477949940405245, |
|
"grad_norm": 0.5844500064849854, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 2.1629, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.42908224076281287, |
|
"grad_norm": 0.7447096109390259, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 2.0827, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.42908224076281287, |
|
"eval_loss": 2.2069106101989746, |
|
"eval_runtime": 23.7385, |
|
"eval_samples_per_second": 14.912, |
|
"eval_steps_per_second": 1.896, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.4433849821215733, |
|
"grad_norm": 0.8038536906242371, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 2.2257, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.4576877234803337, |
|
"grad_norm": 0.6432086229324341, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 2.1321, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.4719904648390942, |
|
"grad_norm": 0.6721349358558655, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 2.1555, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.4719904648390942, |
|
"eval_loss": 2.206080198287964, |
|
"eval_runtime": 23.7434, |
|
"eval_samples_per_second": 14.909, |
|
"eval_steps_per_second": 1.895, |
|
"step": 99 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 9, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.3843843396927488e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|