|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 76, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.013157894736842105, |
|
"grad_norm": 1699.4203999374324, |
|
"learning_rate": 1.25e-06, |
|
"loss": 11.6905, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02631578947368421, |
|
"grad_norm": 1649.6649286698323, |
|
"learning_rate": 2.5e-06, |
|
"loss": 11.7067, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.039473684210526314, |
|
"grad_norm": 1109.6668329617996, |
|
"learning_rate": 3.7500000000000005e-06, |
|
"loss": 10.8238, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.05263157894736842, |
|
"grad_norm": 1106.178486585862, |
|
"learning_rate": 5e-06, |
|
"loss": 9.0694, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.06578947368421052, |
|
"grad_norm": 809.3294910843833, |
|
"learning_rate": 6.25e-06, |
|
"loss": 5.9203, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07894736842105263, |
|
"grad_norm": 257.0422697393994, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 4.7981, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.09210526315789473, |
|
"grad_norm": 394.7803420902946, |
|
"learning_rate": 8.750000000000001e-06, |
|
"loss": 5.5132, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.10526315789473684, |
|
"grad_norm": 126.60762803235828, |
|
"learning_rate": 1e-05, |
|
"loss": 3.2079, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.11842105263157894, |
|
"grad_norm": 125.30453375076969, |
|
"learning_rate": 9.994664874011864e-06, |
|
"loss": 5.0667, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.13157894736842105, |
|
"grad_norm": 158.38384375765978, |
|
"learning_rate": 9.978670881475173e-06, |
|
"loss": 4.1611, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14473684210526316, |
|
"grad_norm": 55.659050974987274, |
|
"learning_rate": 9.952052154376027e-06, |
|
"loss": 3.4048, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.15789473684210525, |
|
"grad_norm": 42.873598431489626, |
|
"learning_rate": 9.91486549841951e-06, |
|
"loss": 4.0195, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.17105263157894737, |
|
"grad_norm": 72.14863137487674, |
|
"learning_rate": 9.867190271803466e-06, |
|
"loss": 4.0778, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.18421052631578946, |
|
"grad_norm": 39.55596624092672, |
|
"learning_rate": 9.809128215864096e-06, |
|
"loss": 3.8024, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.19736842105263158, |
|
"grad_norm": 18.04964705613809, |
|
"learning_rate": 9.74080323795483e-06, |
|
"loss": 3.3671, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.21052631578947367, |
|
"grad_norm": 14.28948406963546, |
|
"learning_rate": 9.66236114702178e-06, |
|
"loss": 2.7161, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.2236842105263158, |
|
"grad_norm": 14.98902092554103, |
|
"learning_rate": 9.573969342440107e-06, |
|
"loss": 2.6427, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.23684210526315788, |
|
"grad_norm": 17.857073526805326, |
|
"learning_rate": 9.475816456775313e-06, |
|
"loss": 2.5664, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 17.982906576778536, |
|
"learning_rate": 9.368111953231849e-06, |
|
"loss": 2.3566, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.2631578947368421, |
|
"grad_norm": 14.066497831682323, |
|
"learning_rate": 9.251085678648072e-06, |
|
"loss": 2.3749, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.27631578947368424, |
|
"grad_norm": 16.568001275060062, |
|
"learning_rate": 9.124987372991512e-06, |
|
"loss": 2.3824, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2894736842105263, |
|
"grad_norm": 10.225207891248253, |
|
"learning_rate": 8.990086136401199e-06, |
|
"loss": 2.2876, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.3026315789473684, |
|
"grad_norm": 15.52475147772328, |
|
"learning_rate": 8.846669854914395e-06, |
|
"loss": 2.0579, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.3157894736842105, |
|
"grad_norm": 10.735876859280078, |
|
"learning_rate": 8.695044586103297e-06, |
|
"loss": 2.1149, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.32894736842105265, |
|
"grad_norm": 12.03589457222836, |
|
"learning_rate": 8.535533905932739e-06, |
|
"loss": 2.0015, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.34210526315789475, |
|
"grad_norm": 10.668355704543856, |
|
"learning_rate": 8.368478218232787e-06, |
|
"loss": 2.0109, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.35526315789473684, |
|
"grad_norm": 11.247848100294151, |
|
"learning_rate": 8.194234028259806e-06, |
|
"loss": 2.0085, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.3684210526315789, |
|
"grad_norm": 10.041875020749039, |
|
"learning_rate": 8.013173181896283e-06, |
|
"loss": 1.707, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.3815789473684211, |
|
"grad_norm": 16.66505157922524, |
|
"learning_rate": 7.82568207211296e-06, |
|
"loss": 1.9144, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.39473684210526316, |
|
"grad_norm": 14.129248059213934, |
|
"learning_rate": 7.63216081438678e-06, |
|
"loss": 1.7373, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.40789473684210525, |
|
"grad_norm": 8.400365715854278, |
|
"learning_rate": 7.4330223928342814e-06, |
|
"loss": 1.606, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"grad_norm": 9.454647164990588, |
|
"learning_rate": 7.2286917788826926e-06, |
|
"loss": 1.5251, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.4342105263157895, |
|
"grad_norm": 11.708573459019293, |
|
"learning_rate": 7.019605024359475e-06, |
|
"loss": 1.4462, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.4473684210526316, |
|
"grad_norm": 11.288211975525503, |
|
"learning_rate": 6.806208330935766e-06, |
|
"loss": 1.3242, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.4605263157894737, |
|
"grad_norm": 12.84833845270528, |
|
"learning_rate": 6.588957097909509e-06, |
|
"loss": 1.2777, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.47368421052631576, |
|
"grad_norm": 12.805717587983919, |
|
"learning_rate": 6.368314950360416e-06, |
|
"loss": 1.2439, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.4868421052631579, |
|
"grad_norm": 16.79814160674569, |
|
"learning_rate": 6.144752749750671e-06, |
|
"loss": 1.2062, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 11.704864343239466, |
|
"learning_rate": 5.918747589082853e-06, |
|
"loss": 1.2953, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.5131578947368421, |
|
"grad_norm": 9.088068501685061, |
|
"learning_rate": 5.690781774759412e-06, |
|
"loss": 1.1413, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.5263157894736842, |
|
"grad_norm": 9.452972340503731, |
|
"learning_rate": 5.46134179731651e-06, |
|
"loss": 1.1089, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5394736842105263, |
|
"grad_norm": 13.866484586191572, |
|
"learning_rate": 5.230917293228699e-06, |
|
"loss": 1.0386, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.5526315789473685, |
|
"grad_norm": 9.961380544449332, |
|
"learning_rate": 5e-06, |
|
"loss": 1.0679, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.5657894736842105, |
|
"grad_norm": 8.460862397122703, |
|
"learning_rate": 4.7690827067713035e-06, |
|
"loss": 1.1567, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.5789473684210527, |
|
"grad_norm": 8.823619822316715, |
|
"learning_rate": 4.53865820268349e-06, |
|
"loss": 1.049, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.5921052631578947, |
|
"grad_norm": 7.271462434248026, |
|
"learning_rate": 4.309218225240591e-06, |
|
"loss": 0.9448, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6052631578947368, |
|
"grad_norm": 15.732461239193407, |
|
"learning_rate": 4.081252410917148e-06, |
|
"loss": 0.9914, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.618421052631579, |
|
"grad_norm": 11.654049140402229, |
|
"learning_rate": 3.855247250249331e-06, |
|
"loss": 0.8027, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.631578947368421, |
|
"grad_norm": 17.59370496573567, |
|
"learning_rate": 3.6316850496395863e-06, |
|
"loss": 1.0106, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.6447368421052632, |
|
"grad_norm": 10.035162483746694, |
|
"learning_rate": 3.4110429020904924e-06, |
|
"loss": 0.8962, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.6578947368421053, |
|
"grad_norm": 7.517763142577863, |
|
"learning_rate": 3.1937916690642356e-06, |
|
"loss": 0.9492, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6710526315789473, |
|
"grad_norm": 15.969988377607551, |
|
"learning_rate": 2.980394975640526e-06, |
|
"loss": 0.8697, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.6842105263157895, |
|
"grad_norm": 7.9971953326811125, |
|
"learning_rate": 2.771308221117309e-06, |
|
"loss": 0.9477, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.6973684210526315, |
|
"grad_norm": 9.166442366719266, |
|
"learning_rate": 2.5669776071657194e-06, |
|
"loss": 0.9585, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.7105263157894737, |
|
"grad_norm": 8.283342586733458, |
|
"learning_rate": 2.3678391856132203e-06, |
|
"loss": 0.948, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.7236842105263158, |
|
"grad_norm": 9.135125773249728, |
|
"learning_rate": 2.174317927887041e-06, |
|
"loss": 0.8435, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.7368421052631579, |
|
"grad_norm": 7.738718289232403, |
|
"learning_rate": 1.9868268181037186e-06, |
|
"loss": 0.9933, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 5.632776555168388, |
|
"learning_rate": 1.8057659717401948e-06, |
|
"loss": 0.8011, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.7631578947368421, |
|
"grad_norm": 6.642349176520887, |
|
"learning_rate": 1.6315217817672142e-06, |
|
"loss": 0.7914, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.7763157894736842, |
|
"grad_norm": 6.204837918911468, |
|
"learning_rate": 1.4644660940672628e-06, |
|
"loss": 0.7612, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.7894736842105263, |
|
"grad_norm": 6.881328834033968, |
|
"learning_rate": 1.3049554138967052e-06, |
|
"loss": 0.7503, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8026315789473685, |
|
"grad_norm": 6.131564640099481, |
|
"learning_rate": 1.1533301450856054e-06, |
|
"loss": 0.817, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.8157894736842105, |
|
"grad_norm": 5.8740721419365585, |
|
"learning_rate": 1.0099138635988026e-06, |
|
"loss": 0.7754, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.8289473684210527, |
|
"grad_norm": 5.434760658098561, |
|
"learning_rate": 8.750126270084891e-07, |
|
"loss": 0.8402, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"grad_norm": 5.408440691228196, |
|
"learning_rate": 7.489143213519301e-07, |
|
"loss": 0.7445, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.8552631578947368, |
|
"grad_norm": 4.506252846496264, |
|
"learning_rate": 6.318880467681527e-07, |
|
"loss": 0.6984, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.868421052631579, |
|
"grad_norm": 7.264506467869279, |
|
"learning_rate": 5.241835432246888e-07, |
|
"loss": 0.7549, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.881578947368421, |
|
"grad_norm": 5.671765225216593, |
|
"learning_rate": 4.2603065755989493e-07, |
|
"loss": 0.8249, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.8947368421052632, |
|
"grad_norm": 4.946461980264793, |
|
"learning_rate": 3.3763885297822153e-07, |
|
"loss": 0.7372, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.9078947368421053, |
|
"grad_norm": 7.216022490434719, |
|
"learning_rate": 2.5919676204517073e-07, |
|
"loss": 0.7816, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.9210526315789473, |
|
"grad_norm": 4.627827587948695, |
|
"learning_rate": 1.908717841359048e-07, |
|
"loss": 0.786, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9342105263157895, |
|
"grad_norm": 3.6873839890732363, |
|
"learning_rate": 1.328097281965357e-07, |
|
"loss": 0.7109, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.9473684210526315, |
|
"grad_norm": 3.9116965099593646, |
|
"learning_rate": 8.513450158049109e-08, |
|
"loss": 0.7468, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.9605263157894737, |
|
"grad_norm": 4.8726672621846365, |
|
"learning_rate": 4.794784562397459e-08, |
|
"loss": 0.6888, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.9736842105263158, |
|
"grad_norm": 4.616871357784515, |
|
"learning_rate": 2.1329118524827662e-08, |
|
"loss": 0.7657, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.9868421052631579, |
|
"grad_norm": 4.101276371849697, |
|
"learning_rate": 5.3351259881379016e-09, |
|
"loss": 0.686, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 4.36368396992744, |
|
"learning_rate": 0.0, |
|
"loss": 0.8604, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.7618905305862427, |
|
"eval_runtime": 129.5439, |
|
"eval_samples_per_second": 39.407, |
|
"eval_steps_per_second": 1.235, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 76, |
|
"total_flos": 2.244031216091136e+16, |
|
"train_loss": 2.2036053005017733, |
|
"train_runtime": 949.5806, |
|
"train_samples_per_second": 10.213, |
|
"train_steps_per_second": 0.08 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 76, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.244031216091136e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|