|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.1272264631043257, |
|
"eval_steps": 13, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.002544529262086514, |
|
"grad_norm": 3.013115882873535, |
|
"learning_rate": 5e-06, |
|
"loss": 1.1819, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002544529262086514, |
|
"eval_loss": 1.6131161451339722, |
|
"eval_runtime": 4.1535, |
|
"eval_samples_per_second": 39.966, |
|
"eval_steps_per_second": 19.983, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005089058524173028, |
|
"grad_norm": 3.4997785091400146, |
|
"learning_rate": 1e-05, |
|
"loss": 1.447, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.007633587786259542, |
|
"grad_norm": 2.859135627746582, |
|
"learning_rate": 1.5e-05, |
|
"loss": 1.1831, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.010178117048346057, |
|
"grad_norm": 2.542371988296509, |
|
"learning_rate": 2e-05, |
|
"loss": 0.8844, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01272264631043257, |
|
"grad_norm": 2.9563982486724854, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.2164, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.015267175572519083, |
|
"grad_norm": 3.415498733520508, |
|
"learning_rate": 3e-05, |
|
"loss": 1.3261, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.017811704834605598, |
|
"grad_norm": 3.1173617839813232, |
|
"learning_rate": 3.5e-05, |
|
"loss": 1.2456, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.020356234096692113, |
|
"grad_norm": 2.952441453933716, |
|
"learning_rate": 4e-05, |
|
"loss": 1.0949, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.022900763358778626, |
|
"grad_norm": 3.2792415618896484, |
|
"learning_rate": 4.5e-05, |
|
"loss": 1.1191, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02544529262086514, |
|
"grad_norm": 2.428950786590576, |
|
"learning_rate": 5e-05, |
|
"loss": 0.9415, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.027989821882951654, |
|
"grad_norm": 3.6672072410583496, |
|
"learning_rate": 4.99229333433282e-05, |
|
"loss": 1.1482, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.030534351145038167, |
|
"grad_norm": 2.3909623622894287, |
|
"learning_rate": 4.9692208514878444e-05, |
|
"loss": 0.7157, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03307888040712468, |
|
"grad_norm": 2.3522462844848633, |
|
"learning_rate": 4.9309248009941914e-05, |
|
"loss": 0.5238, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.03307888040712468, |
|
"eval_loss": 0.7261966466903687, |
|
"eval_runtime": 4.1767, |
|
"eval_samples_per_second": 39.744, |
|
"eval_steps_per_second": 19.872, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.035623409669211195, |
|
"grad_norm": 3.122359275817871, |
|
"learning_rate": 4.877641290737884e-05, |
|
"loss": 0.7318, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03816793893129771, |
|
"grad_norm": 5.089092254638672, |
|
"learning_rate": 4.8096988312782174e-05, |
|
"loss": 0.8987, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04071246819338423, |
|
"grad_norm": 2.803917407989502, |
|
"learning_rate": 4.72751631047092e-05, |
|
"loss": 0.5268, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.043256997455470736, |
|
"grad_norm": 2.718916654586792, |
|
"learning_rate": 4.6316004108852305e-05, |
|
"loss": 0.6317, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.04580152671755725, |
|
"grad_norm": 2.230231285095215, |
|
"learning_rate": 4.522542485937369e-05, |
|
"loss": 0.2857, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04834605597964377, |
|
"grad_norm": 1.5982710123062134, |
|
"learning_rate": 4.401014914000078e-05, |
|
"loss": 0.1697, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.05089058524173028, |
|
"grad_norm": 1.402782917022705, |
|
"learning_rate": 4.267766952966369e-05, |
|
"loss": 0.2827, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05343511450381679, |
|
"grad_norm": 1.7604286670684814, |
|
"learning_rate": 4.123620120825459e-05, |
|
"loss": 0.1834, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.05597964376590331, |
|
"grad_norm": 4.199867248535156, |
|
"learning_rate": 3.969463130731183e-05, |
|
"loss": 0.4652, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.058524173027989825, |
|
"grad_norm": 1.1508660316467285, |
|
"learning_rate": 3.8062464117898724e-05, |
|
"loss": 0.1081, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.061068702290076333, |
|
"grad_norm": 1.6623399257659912, |
|
"learning_rate": 3.634976249348867e-05, |
|
"loss": 0.1578, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.06361323155216285, |
|
"grad_norm": 1.1933339834213257, |
|
"learning_rate": 3.456708580912725e-05, |
|
"loss": 0.1515, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06615776081424936, |
|
"grad_norm": 1.6038551330566406, |
|
"learning_rate": 3.272542485937369e-05, |
|
"loss": 0.2152, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.06615776081424936, |
|
"eval_loss": 0.18286243081092834, |
|
"eval_runtime": 4.0988, |
|
"eval_samples_per_second": 40.499, |
|
"eval_steps_per_second": 20.25, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.06870229007633588, |
|
"grad_norm": 1.0369246006011963, |
|
"learning_rate": 3.083613409639764e-05, |
|
"loss": 0.1151, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.07124681933842239, |
|
"grad_norm": 1.37936270236969, |
|
"learning_rate": 2.8910861626005776e-05, |
|
"loss": 0.1626, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0737913486005089, |
|
"grad_norm": 0.9308303594589233, |
|
"learning_rate": 2.6961477393196126e-05, |
|
"loss": 0.1095, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.07633587786259542, |
|
"grad_norm": 1.8044286966323853, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.0947, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07888040712468193, |
|
"grad_norm": 1.0868867635726929, |
|
"learning_rate": 2.303852260680388e-05, |
|
"loss": 0.2049, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.08142493638676845, |
|
"grad_norm": 2.401944875717163, |
|
"learning_rate": 2.1089138373994223e-05, |
|
"loss": 0.2697, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.08396946564885496, |
|
"grad_norm": 1.6872973442077637, |
|
"learning_rate": 1.9163865903602374e-05, |
|
"loss": 0.1731, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.08651399491094147, |
|
"grad_norm": 1.331525206565857, |
|
"learning_rate": 1.7274575140626318e-05, |
|
"loss": 0.16, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.089058524173028, |
|
"grad_norm": 2.928229570388794, |
|
"learning_rate": 1.5432914190872757e-05, |
|
"loss": 0.2078, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0916030534351145, |
|
"grad_norm": 1.3831185102462769, |
|
"learning_rate": 1.3650237506511331e-05, |
|
"loss": 0.0663, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.09414758269720101, |
|
"grad_norm": 1.482015609741211, |
|
"learning_rate": 1.1937535882101281e-05, |
|
"loss": 0.1134, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.09669211195928754, |
|
"grad_norm": 1.5263253450393677, |
|
"learning_rate": 1.0305368692688174e-05, |
|
"loss": 0.1705, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.09923664122137404, |
|
"grad_norm": 1.535754680633545, |
|
"learning_rate": 8.763798791745411e-06, |
|
"loss": 0.1579, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.09923664122137404, |
|
"eval_loss": 0.16544334590435028, |
|
"eval_runtime": 4.1198, |
|
"eval_samples_per_second": 40.293, |
|
"eval_steps_per_second": 20.147, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.10178117048346055, |
|
"grad_norm": 1.3084092140197754, |
|
"learning_rate": 7.3223304703363135e-06, |
|
"loss": 0.1543, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.10432569974554708, |
|
"grad_norm": 2.607229232788086, |
|
"learning_rate": 5.989850859999227e-06, |
|
"loss": 0.2427, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.10687022900763359, |
|
"grad_norm": 2.31429123878479, |
|
"learning_rate": 4.7745751406263165e-06, |
|
"loss": 0.1742, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.10941475826972011, |
|
"grad_norm": 2.2048234939575195, |
|
"learning_rate": 3.6839958911476957e-06, |
|
"loss": 0.1644, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.11195928753180662, |
|
"grad_norm": 0.9953597784042358, |
|
"learning_rate": 2.7248368952908053e-06, |
|
"loss": 0.0656, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.11450381679389313, |
|
"grad_norm": 1.6188597679138184, |
|
"learning_rate": 1.9030116872178316e-06, |
|
"loss": 0.155, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11704834605597965, |
|
"grad_norm": 1.3625197410583496, |
|
"learning_rate": 1.2235870926211619e-06, |
|
"loss": 0.1116, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.11959287531806616, |
|
"grad_norm": 0.8615650534629822, |
|
"learning_rate": 6.907519900580861e-07, |
|
"loss": 0.038, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.12213740458015267, |
|
"grad_norm": 1.747086763381958, |
|
"learning_rate": 3.077914851215585e-07, |
|
"loss": 0.3618, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.12468193384223919, |
|
"grad_norm": 1.5428842306137085, |
|
"learning_rate": 7.706665667180091e-08, |
|
"loss": 0.1127, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1272264631043257, |
|
"grad_norm": 1.6774532794952393, |
|
"learning_rate": 0.0, |
|
"loss": 0.1539, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 13, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2500486653542400.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|