|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 30, |
|
"global_step": 450, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.022222222222222223, |
|
"grad_norm": 0.8203227519989014, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 0.4543, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.044444444444444446, |
|
"grad_norm": 0.09747839719057083, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 0.0143, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06666666666666667, |
|
"grad_norm": 0.008741447702050209, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 0.0042, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.06666666666666667, |
|
"eval_loss": 0.004266222473233938, |
|
"eval_runtime": 117.2469, |
|
"eval_samples_per_second": 17.058, |
|
"eval_steps_per_second": 0.426, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08888888888888889, |
|
"grad_norm": 0.014671124517917633, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 0.0042, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.1111111111111111, |
|
"grad_norm": 0.008508998900651932, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.004, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13333333333333333, |
|
"grad_norm": 0.009779859334230423, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 0.0039, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.13333333333333333, |
|
"eval_loss": 0.0038896214682608843, |
|
"eval_runtime": 117.3001, |
|
"eval_samples_per_second": 17.05, |
|
"eval_steps_per_second": 0.426, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.15555555555555556, |
|
"grad_norm": 0.007619491778314114, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.0039, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.17777777777777778, |
|
"grad_norm": 0.006427408196032047, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 0.0039, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.006157474592328072, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.0039, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_loss": 0.0038220968563109636, |
|
"eval_runtime": 117.37, |
|
"eval_samples_per_second": 17.04, |
|
"eval_steps_per_second": 0.426, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.2222222222222222, |
|
"grad_norm": 0.03153691068291664, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.0039, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.24444444444444444, |
|
"grad_norm": 0.0021719736978411674, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 0.0038, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.26666666666666666, |
|
"grad_norm": 0.013368754647672176, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 0.0038, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.26666666666666666, |
|
"eval_loss": 0.0037406720221042633, |
|
"eval_runtime": 199.847, |
|
"eval_samples_per_second": 10.008, |
|
"eval_steps_per_second": 0.25, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.28888888888888886, |
|
"grad_norm": 0.014529922977089882, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 0.0037, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.3111111111111111, |
|
"grad_norm": 0.0757686048746109, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 0.0037, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.3333333333333333, |
|
"grad_norm": 0.01011459156870842, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.0038, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3333333333333333, |
|
"eval_loss": 0.0037290642503648996, |
|
"eval_runtime": 117.2836, |
|
"eval_samples_per_second": 17.053, |
|
"eval_steps_per_second": 0.426, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.35555555555555557, |
|
"grad_norm": 0.007393770385533571, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 0.0038, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.37777777777777777, |
|
"grad_norm": 0.007255127187818289, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 0.0037, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.029892487451434135, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.0038, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"eval_loss": 0.0037513442803174257, |
|
"eval_runtime": 117.2984, |
|
"eval_samples_per_second": 17.051, |
|
"eval_steps_per_second": 0.426, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.4222222222222222, |
|
"grad_norm": 0.013895494863390923, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 0.0037, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.4444444444444444, |
|
"grad_norm": 0.009924459271132946, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 0.0036, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4666666666666667, |
|
"grad_norm": 0.01607881858944893, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 0.0037, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.4666666666666667, |
|
"eval_loss": 0.003465294372290373, |
|
"eval_runtime": 142.6159, |
|
"eval_samples_per_second": 14.024, |
|
"eval_steps_per_second": 0.351, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.4888888888888889, |
|
"grad_norm": 0.05860644206404686, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 0.0035, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.5111111111111111, |
|
"grad_norm": 0.07811647653579712, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 0.0036, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.5333333333333333, |
|
"grad_norm": 0.019251324236392975, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 0.0032, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.5333333333333333, |
|
"eval_loss": 0.0030640396289527416, |
|
"eval_runtime": 189.2243, |
|
"eval_samples_per_second": 10.569, |
|
"eval_steps_per_second": 0.264, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.5555555555555556, |
|
"grad_norm": 0.019416367635130882, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.003, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5777777777777777, |
|
"grad_norm": 0.028752854093909264, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 0.0027, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.014850922860205173, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 0.0028, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"eval_loss": 0.002417274285107851, |
|
"eval_runtime": 117.247, |
|
"eval_samples_per_second": 17.058, |
|
"eval_steps_per_second": 0.426, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.6222222222222222, |
|
"grad_norm": 0.01956513710319996, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 0.0026, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.6444444444444445, |
|
"grad_norm": 0.010545406490564346, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 0.0026, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 0.05216179043054581, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.0025, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"eval_loss": 0.002407504478469491, |
|
"eval_runtime": 117.3038, |
|
"eval_samples_per_second": 17.05, |
|
"eval_steps_per_second": 0.426, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6888888888888889, |
|
"grad_norm": 0.017784912139177322, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 0.0027, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.7111111111111111, |
|
"grad_norm": 0.02129705250263214, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 0.0025, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.7333333333333333, |
|
"grad_norm": 0.011542588472366333, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 0.0025, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.7333333333333333, |
|
"eval_loss": 0.0024114707484841347, |
|
"eval_runtime": 211.1966, |
|
"eval_samples_per_second": 9.47, |
|
"eval_steps_per_second": 0.237, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.7555555555555555, |
|
"grad_norm": 0.027737773954868317, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 0.0024, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.7777777777777778, |
|
"grad_norm": 0.02074122428894043, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 0.0025, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.01723620295524597, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 0.0022, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_loss": 0.0022669117897748947, |
|
"eval_runtime": 117.3791, |
|
"eval_samples_per_second": 17.039, |
|
"eval_steps_per_second": 0.426, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.8222222222222222, |
|
"grad_norm": 0.024718888103961945, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 0.0023, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.8444444444444444, |
|
"grad_norm": 0.010936993174254894, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 0.0023, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.8666666666666667, |
|
"grad_norm": 0.0511295422911644, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 0.0023, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.8666666666666667, |
|
"eval_loss": 0.002225763164460659, |
|
"eval_runtime": 117.4249, |
|
"eval_samples_per_second": 17.032, |
|
"eval_steps_per_second": 0.426, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.8888888888888888, |
|
"grad_norm": 0.032784491777420044, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.0021, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.9111111111111111, |
|
"grad_norm": 0.045701805502176285, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 0.0022, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.9333333333333333, |
|
"grad_norm": 0.0103871775791049, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 0.0022, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.9333333333333333, |
|
"eval_loss": 0.0022246255539357662, |
|
"eval_runtime": 117.3984, |
|
"eval_samples_per_second": 17.036, |
|
"eval_steps_per_second": 0.426, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.9555555555555556, |
|
"grad_norm": 0.03928959742188454, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 0.0024, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.9777777777777777, |
|
"grad_norm": 0.03138284012675285, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 0.0024, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.019241375848650932, |
|
"learning_rate": 0.0, |
|
"loss": 0.0021, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.0022388531360775232, |
|
"eval_runtime": 117.3729, |
|
"eval_samples_per_second": 17.04, |
|
"eval_steps_per_second": 0.426, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 450, |
|
"total_flos": 1.663912988085584e+18, |
|
"train_loss": 0.013403736940688558, |
|
"train_runtime": 5942.7183, |
|
"train_samples_per_second": 3.029, |
|
"train_steps_per_second": 0.076 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 450, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.663912988085584e+18, |
|
"train_batch_size": 10, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|