|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9990265609790016, |
|
"eval_steps": 200, |
|
"global_step": 898, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.001112501738283966, |
|
"grad_norm": 0.6684414708991117, |
|
"learning_rate": 2.2222222222222225e-06, |
|
"loss": 1.1751, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00556250869141983, |
|
"grad_norm": 0.7639609960034875, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 1.282, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01112501738283966, |
|
"grad_norm": 0.8970088039778137, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 1.276, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.016687526074259492, |
|
"grad_norm": 0.5983155959810567, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.2166, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02225003476567932, |
|
"grad_norm": 0.9387996319771594, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 1.0526, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.027812543457099152, |
|
"grad_norm": 0.3387344315023908, |
|
"learning_rate": 5.555555555555556e-05, |
|
"loss": 0.9608, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.033375052148518984, |
|
"grad_norm": 0.23890923775202408, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.911, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03893756083993881, |
|
"grad_norm": 0.22798914076978022, |
|
"learning_rate": 7.777777777777778e-05, |
|
"loss": 0.8445, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04450006953135864, |
|
"grad_norm": 0.31587333267646156, |
|
"learning_rate": 8.888888888888889e-05, |
|
"loss": 0.8105, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05006257822277847, |
|
"grad_norm": 0.19505366669382362, |
|
"learning_rate": 0.0001, |
|
"loss": 0.8265, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.055625086914198305, |
|
"grad_norm": 0.20253125415926881, |
|
"learning_rate": 0.00011111111111111112, |
|
"loss": 0.7973, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.061187595605618136, |
|
"grad_norm": 0.19874800607368992, |
|
"learning_rate": 0.00012222222222222224, |
|
"loss": 0.7554, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.06675010429703797, |
|
"grad_norm": 0.1603617233155014, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 0.7737, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0723126129884578, |
|
"grad_norm": 0.182313382422268, |
|
"learning_rate": 0.00014444444444444444, |
|
"loss": 0.7744, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.07787512167987762, |
|
"grad_norm": 0.1404552378108098, |
|
"learning_rate": 0.00015555555555555556, |
|
"loss": 0.7995, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.08343763037129745, |
|
"grad_norm": 0.15330163912523168, |
|
"learning_rate": 0.0001666666666666667, |
|
"loss": 0.7854, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.08900013906271728, |
|
"grad_norm": 0.15880144560375917, |
|
"learning_rate": 0.00017777777777777779, |
|
"loss": 0.7572, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.09456264775413711, |
|
"grad_norm": 0.17526658243677723, |
|
"learning_rate": 0.00018888888888888888, |
|
"loss": 0.7421, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.10012515644555695, |
|
"grad_norm": 0.1756854246552801, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7569, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.10568766513697678, |
|
"grad_norm": 0.13988463897936598, |
|
"learning_rate": 0.00019998110384864614, |
|
"loss": 0.7271, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.11125017382839661, |
|
"grad_norm": 0.14983210481706255, |
|
"learning_rate": 0.0001999244225358753, |
|
"loss": 0.7444, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.11681268251981644, |
|
"grad_norm": 0.14861727909266345, |
|
"learning_rate": 0.00019982997748286082, |
|
"loss": 0.7783, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.12237519121123627, |
|
"grad_norm": 0.17979168387360328, |
|
"learning_rate": 0.00019969780438256293, |
|
"loss": 0.7651, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.1279376999026561, |
|
"grad_norm": 0.16202080618298298, |
|
"learning_rate": 0.00019952795318623986, |
|
"loss": 0.719, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.13350020859407594, |
|
"grad_norm": 0.13085394814942794, |
|
"learning_rate": 0.0001993204880845699, |
|
"loss": 0.7044, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.13906271728549577, |
|
"grad_norm": 0.1369700622873931, |
|
"learning_rate": 0.00019907548748339222, |
|
"loss": 0.7166, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.1446252259769156, |
|
"grad_norm": 0.12798471944493164, |
|
"learning_rate": 0.0001987930439740757, |
|
"loss": 0.7302, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.15018773466833543, |
|
"grad_norm": 0.16062970178106098, |
|
"learning_rate": 0.0001984732642985263, |
|
"loss": 0.7477, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.15575024335975524, |
|
"grad_norm": 0.12776472354551027, |
|
"learning_rate": 0.0001981162693088471, |
|
"loss": 0.7445, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.16131275205117507, |
|
"grad_norm": 0.13554426042598045, |
|
"learning_rate": 0.00019772219392166519, |
|
"loss": 0.7252, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.1668752607425949, |
|
"grad_norm": 0.13347482010697612, |
|
"learning_rate": 0.00019729118706714375, |
|
"loss": 0.7254, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.17243776943401473, |
|
"grad_norm": 0.134631392605464, |
|
"learning_rate": 0.000196823411632698, |
|
"loss": 0.7184, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.17800027812543456, |
|
"grad_norm": 0.1504769169328635, |
|
"learning_rate": 0.00019631904440143612, |
|
"loss": 0.7259, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.1835627868168544, |
|
"grad_norm": 0.13782774235003004, |
|
"learning_rate": 0.00019577827598534885, |
|
"loss": 0.7367, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.18912529550827423, |
|
"grad_norm": 0.1363344338061366, |
|
"learning_rate": 0.00019520131075327298, |
|
"loss": 0.722, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.19468780419969406, |
|
"grad_norm": 0.14621350415121373, |
|
"learning_rate": 0.00019458836675365556, |
|
"loss": 0.73, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.2002503128911139, |
|
"grad_norm": 0.15278741364826062, |
|
"learning_rate": 0.00019393967563214833, |
|
"loss": 0.6995, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.20581282158253372, |
|
"grad_norm": 0.13331150727297583, |
|
"learning_rate": 0.00019325548254406352, |
|
"loss": 0.7487, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.21137533027395355, |
|
"grad_norm": 0.1412877661682615, |
|
"learning_rate": 0.00019253604606172417, |
|
"loss": 0.7359, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.2169378389653734, |
|
"grad_norm": 0.15015640302548008, |
|
"learning_rate": 0.0001917816380767434, |
|
"loss": 0.7014, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.22250034765679322, |
|
"grad_norm": 0.13131810843452316, |
|
"learning_rate": 0.0001909925436972706, |
|
"loss": 0.7113, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22250034765679322, |
|
"eval_loss": 0.7031363844871521, |
|
"eval_runtime": 38.8569, |
|
"eval_samples_per_second": 10.474, |
|
"eval_steps_per_second": 0.669, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22806285634821305, |
|
"grad_norm": 0.14737211911767406, |
|
"learning_rate": 0.0001901690611402423, |
|
"loss": 0.7264, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.23362536503963288, |
|
"grad_norm": 0.1357323208908195, |
|
"learning_rate": 0.00018931150161867916, |
|
"loss": 0.7366, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.2391878737310527, |
|
"grad_norm": 0.13993345760253112, |
|
"learning_rate": 0.0001884201892240715, |
|
"loss": 0.7117, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.24475038242247255, |
|
"grad_norm": 0.1369219623281007, |
|
"learning_rate": 0.00018749546080389757, |
|
"loss": 0.7197, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.2503128911138924, |
|
"grad_norm": 0.15142549765891067, |
|
"learning_rate": 0.00018653766583432113, |
|
"loss": 0.7402, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.2558753998053122, |
|
"grad_norm": 0.1268630478080886, |
|
"learning_rate": 0.0001855471662881164, |
|
"loss": 0.7061, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.26143790849673204, |
|
"grad_norm": 0.15250219181797606, |
|
"learning_rate": 0.0001845243364978702, |
|
"loss": 0.6856, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.2670004171881519, |
|
"grad_norm": 0.12578511590375505, |
|
"learning_rate": 0.00018346956301451304, |
|
"loss": 0.6933, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.2725629258795717, |
|
"grad_norm": 0.12595811486114722, |
|
"learning_rate": 0.00018238324446123266, |
|
"loss": 0.7216, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.27812543457099154, |
|
"grad_norm": 0.1372248567648991, |
|
"learning_rate": 0.00018126579138282503, |
|
"loss": 0.7092, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.28368794326241137, |
|
"grad_norm": 0.1268675904703008, |
|
"learning_rate": 0.0001801176260905402, |
|
"loss": 0.6885, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.2892504519538312, |
|
"grad_norm": 0.15050289967108632, |
|
"learning_rate": 0.00017893918250248104, |
|
"loss": 0.7095, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.29481296064525103, |
|
"grad_norm": 0.13336034584149736, |
|
"learning_rate": 0.00017773090597961554, |
|
"loss": 0.6966, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.30037546933667086, |
|
"grad_norm": 0.158284438999716, |
|
"learning_rate": 0.00017649325315746478, |
|
"loss": 0.7275, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.3059379780280907, |
|
"grad_norm": 0.14579684984990443, |
|
"learning_rate": 0.00017522669177352977, |
|
"loss": 0.7206, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.31150048671951047, |
|
"grad_norm": 0.12269074197800435, |
|
"learning_rate": 0.0001739317004905227, |
|
"loss": 0.697, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.3170629954109303, |
|
"grad_norm": 0.13835017103100894, |
|
"learning_rate": 0.00017260876871546936, |
|
"loss": 0.7174, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.32262550410235014, |
|
"grad_norm": 0.14300200863185467, |
|
"learning_rate": 0.00017125839641475072, |
|
"loss": 0.726, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.32818801279376997, |
|
"grad_norm": 0.12914092926857734, |
|
"learning_rate": 0.0001698810939251543, |
|
"loss": 0.6991, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.3337505214851898, |
|
"grad_norm": 0.1293105805311886, |
|
"learning_rate": 0.00016847738176100632, |
|
"loss": 0.7264, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.33931303017660963, |
|
"grad_norm": 0.1263743857475521, |
|
"learning_rate": 0.00016704779041745686, |
|
"loss": 0.6938, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.34487553886802946, |
|
"grad_norm": 0.16556508393117347, |
|
"learning_rate": 0.000165592860169994, |
|
"loss": 0.7251, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.3504380475594493, |
|
"grad_norm": 0.13892185415083144, |
|
"learning_rate": 0.00016411314087026106, |
|
"loss": 0.7439, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.3560005562508691, |
|
"grad_norm": 0.1427616131100252, |
|
"learning_rate": 0.00016260919173825508, |
|
"loss": 0.6905, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.36156306494228896, |
|
"grad_norm": 0.1187614399483276, |
|
"learning_rate": 0.00016108158115098444, |
|
"loss": 0.681, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.3671255736337088, |
|
"grad_norm": 0.1428657673910464, |
|
"learning_rate": 0.0001595308864276666, |
|
"loss": 0.705, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.3726880823251286, |
|
"grad_norm": 0.14224088322242012, |
|
"learning_rate": 0.00015795769361154547, |
|
"loss": 0.7139, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.37825059101654845, |
|
"grad_norm": 0.1456871215491553, |
|
"learning_rate": 0.00015636259724841222, |
|
"loss": 0.7292, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.3838130997079683, |
|
"grad_norm": 0.13983468342540284, |
|
"learning_rate": 0.00015474620016191294, |
|
"loss": 0.7222, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.3893756083993881, |
|
"grad_norm": 0.14315721096194434, |
|
"learning_rate": 0.00015310911322572753, |
|
"loss": 0.7112, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.39493811709080795, |
|
"grad_norm": 0.12717703522604246, |
|
"learning_rate": 0.00015145195513270644, |
|
"loss": 0.7018, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.4005006257822278, |
|
"grad_norm": 0.1405738893906113, |
|
"learning_rate": 0.0001497753521610526, |
|
"loss": 0.708, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.4060631344736476, |
|
"grad_norm": 0.12649904699680478, |
|
"learning_rate": 0.00014807993793763619, |
|
"loss": 0.6728, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.41162564316506745, |
|
"grad_norm": 0.1366397886559032, |
|
"learning_rate": 0.00014636635319853275, |
|
"loss": 0.6855, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.4171881518564873, |
|
"grad_norm": 0.1348575434185503, |
|
"learning_rate": 0.00014463524554687399, |
|
"loss": 0.6969, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.4227506605479071, |
|
"grad_norm": 0.13467406560845768, |
|
"learning_rate": 0.0001428872692081038, |
|
"loss": 0.7179, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.42831316923932694, |
|
"grad_norm": 0.13474764353512392, |
|
"learning_rate": 0.00014112308478273145, |
|
"loss": 0.6853, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.4338756779307468, |
|
"grad_norm": 0.12908327147450624, |
|
"learning_rate": 0.00013934335899667527, |
|
"loss": 0.7131, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.4394381866221666, |
|
"grad_norm": 0.12215743992138174, |
|
"learning_rate": 0.00013754876444929166, |
|
"loss": 0.6748, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.44500069531358644, |
|
"grad_norm": 0.12646747341185763, |
|
"learning_rate": 0.0001357399793591844, |
|
"loss": 0.7181, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.44500069531358644, |
|
"eval_loss": 0.6872018575668335, |
|
"eval_runtime": 38.1286, |
|
"eval_samples_per_second": 10.674, |
|
"eval_steps_per_second": 0.682, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.45056320400500627, |
|
"grad_norm": 0.13122165542663003, |
|
"learning_rate": 0.00013391768730789002, |
|
"loss": 0.7121, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.4561257126964261, |
|
"grad_norm": 0.12302514176584399, |
|
"learning_rate": 0.00013208257698153677, |
|
"loss": 0.7122, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.46168822138784593, |
|
"grad_norm": 0.14440204630512954, |
|
"learning_rate": 0.00013023534191057426, |
|
"loss": 0.7239, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.46725073007926576, |
|
"grad_norm": 0.13547988803582495, |
|
"learning_rate": 0.0001283766802076722, |
|
"loss": 0.7206, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.4728132387706856, |
|
"grad_norm": 0.13999737919216407, |
|
"learning_rate": 0.00012650729430388764, |
|
"loss": 0.6826, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.4783757474621054, |
|
"grad_norm": 0.13048583251685064, |
|
"learning_rate": 0.00012462789068320017, |
|
"loss": 0.6926, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.48393825615352526, |
|
"grad_norm": 0.1496684308590174, |
|
"learning_rate": 0.00012273917961551513, |
|
"loss": 0.7342, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.4895007648449451, |
|
"grad_norm": 0.14327740803897993, |
|
"learning_rate": 0.00012084187488823657, |
|
"loss": 0.6966, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.4950632735363649, |
|
"grad_norm": 0.14722640475177795, |
|
"learning_rate": 0.00011893669353651031, |
|
"loss": 0.7045, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.5006257822277848, |
|
"grad_norm": 0.13182498566362877, |
|
"learning_rate": 0.00011702435557223987, |
|
"loss": 0.7205, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.5061882909192046, |
|
"grad_norm": 0.14649854622775277, |
|
"learning_rate": 0.00011510558371197753, |
|
"loss": 0.6972, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.5117507996106244, |
|
"grad_norm": 0.14603995632853023, |
|
"learning_rate": 0.00011318110310379301, |
|
"loss": 0.7455, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.5173133083020443, |
|
"grad_norm": 0.1413104318676706, |
|
"learning_rate": 0.0001112516410532233, |
|
"loss": 0.6917, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.5228758169934641, |
|
"grad_norm": 0.1319527122221744, |
|
"learning_rate": 0.00010931792674840718, |
|
"loss": 0.6855, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.5284383256848839, |
|
"grad_norm": 0.1245675456499167, |
|
"learning_rate": 0.0001073806909845082, |
|
"loss": 0.7028, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.5340008343763037, |
|
"grad_norm": 0.11698601668007995, |
|
"learning_rate": 0.00010544066588753044, |
|
"loss": 0.6933, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.5395633430677236, |
|
"grad_norm": 0.12677292396669446, |
|
"learning_rate": 0.00010349858463763113, |
|
"loss": 0.683, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.5451258517591434, |
|
"grad_norm": 0.13456238683420088, |
|
"learning_rate": 0.0001015551811920351, |
|
"loss": 0.7006, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.5506883604505632, |
|
"grad_norm": 0.15227986749806005, |
|
"learning_rate": 9.961119000765531e-05, |
|
"loss": 0.6894, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.5562508691419831, |
|
"grad_norm": 0.12847199230412348, |
|
"learning_rate": 9.766734576352478e-05, |
|
"loss": 0.6964, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.5618133778334029, |
|
"grad_norm": 0.13064877508080042, |
|
"learning_rate": 9.572438308314446e-05, |
|
"loss": 0.6885, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.5673758865248227, |
|
"grad_norm": 0.1372527364951275, |
|
"learning_rate": 9.378303625685195e-05, |
|
"loss": 0.7056, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.5729383952162426, |
|
"grad_norm": 0.13318320240144188, |
|
"learning_rate": 9.18440389643165e-05, |
|
"loss": 0.6792, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.5785009039076624, |
|
"grad_norm": 0.1389287540275015, |
|
"learning_rate": 8.990812399726435e-05, |
|
"loss": 0.6955, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.5840634125990822, |
|
"grad_norm": 0.15589038738911026, |
|
"learning_rate": 8.797602298254004e-05, |
|
"loss": 0.6995, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.5896259212905021, |
|
"grad_norm": 0.13296983699316012, |
|
"learning_rate": 8.604846610560771e-05, |
|
"loss": 0.6707, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.5951884299819219, |
|
"grad_norm": 0.12114356841208347, |
|
"learning_rate": 8.412618183459708e-05, |
|
"loss": 0.7015, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.6007509386733417, |
|
"grad_norm": 0.12351709388346521, |
|
"learning_rate": 8.220989664499878e-05, |
|
"loss": 0.7006, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.6063134473647616, |
|
"grad_norm": 0.1233117181776912, |
|
"learning_rate": 8.030033474511249e-05, |
|
"loss": 0.7061, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.6118759560561814, |
|
"grad_norm": 0.14966743246815337, |
|
"learning_rate": 7.839821780235168e-05, |
|
"loss": 0.7383, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.6174384647476012, |
|
"grad_norm": 0.13689976425741557, |
|
"learning_rate": 7.650426467050926e-05, |
|
"loss": 0.7083, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.6230009734390209, |
|
"grad_norm": 0.1540376322278511, |
|
"learning_rate": 7.461919111808595e-05, |
|
"loss": 0.6991, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.6285634821304408, |
|
"grad_norm": 0.15818277915085305, |
|
"learning_rate": 7.274370955778498e-05, |
|
"loss": 0.7147, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.6341259908218606, |
|
"grad_norm": 0.13733946492240742, |
|
"learning_rate": 7.087852877727481e-05, |
|
"loss": 0.7043, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.6396884995132804, |
|
"grad_norm": 0.12905771101166952, |
|
"learning_rate": 6.902435367132208e-05, |
|
"loss": 0.6952, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.6452510082047003, |
|
"grad_norm": 0.14917864740623707, |
|
"learning_rate": 6.718188497539554e-05, |
|
"loss": 0.6894, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.6508135168961201, |
|
"grad_norm": 0.17034610159423622, |
|
"learning_rate": 6.535181900084206e-05, |
|
"loss": 0.6906, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.6563760255875399, |
|
"grad_norm": 0.14577692793179953, |
|
"learning_rate": 6.35348473717345e-05, |
|
"loss": 0.6822, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.6619385342789598, |
|
"grad_norm": 0.1400663188779511, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 0.702, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.6675010429703796, |
|
"grad_norm": 0.13477519960817833, |
|
"learning_rate": 5.9942928643364724e-05, |
|
"loss": 0.7171, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.6675010429703796, |
|
"eval_loss": 0.6785927414894104, |
|
"eval_runtime": 38.1279, |
|
"eval_samples_per_second": 10.675, |
|
"eval_steps_per_second": 0.682, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.6730635516617994, |
|
"grad_norm": 0.13058646321017944, |
|
"learning_rate": 5.816933901290136e-05, |
|
"loss": 0.6959, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.6786260603532193, |
|
"grad_norm": 0.14039443258781562, |
|
"learning_rate": 5.6411558152462894e-05, |
|
"loss": 0.6991, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.6841885690446391, |
|
"grad_norm": 0.1435099198472092, |
|
"learning_rate": 5.4670250367913023e-05, |
|
"loss": 0.7008, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.6897510777360589, |
|
"grad_norm": 0.1251912516827599, |
|
"learning_rate": 5.2946073739560706e-05, |
|
"loss": 0.6828, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.6953135864274788, |
|
"grad_norm": 0.12907022711465035, |
|
"learning_rate": 5.1239679873456634e-05, |
|
"loss": 0.6811, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.7008760951188986, |
|
"grad_norm": 0.13237387347735238, |
|
"learning_rate": 4.955171365513603e-05, |
|
"loss": 0.7235, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.7064386038103184, |
|
"grad_norm": 0.12376618467656812, |
|
"learning_rate": 4.7882813005901696e-05, |
|
"loss": 0.687, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.7120011125017383, |
|
"grad_norm": 0.1456941018180003, |
|
"learning_rate": 4.623360864173893e-05, |
|
"loss": 0.6813, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.7175636211931581, |
|
"grad_norm": 0.17032008806579718, |
|
"learning_rate": 4.460472383495331e-05, |
|
"loss": 0.6795, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.7231261298845779, |
|
"grad_norm": 0.12579166198761085, |
|
"learning_rate": 4.2996774178621736e-05, |
|
"loss": 0.6835, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.7286886385759977, |
|
"grad_norm": 0.14450683290791613, |
|
"learning_rate": 4.141036735394574e-05, |
|
"loss": 0.6862, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.7342511472674176, |
|
"grad_norm": 0.17119446089706108, |
|
"learning_rate": 3.984610290059467e-05, |
|
"loss": 0.7112, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.7398136559588374, |
|
"grad_norm": 0.14950992219628134, |
|
"learning_rate": 3.830457199012585e-05, |
|
"loss": 0.7259, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.7453761646502572, |
|
"grad_norm": 0.14485285236277412, |
|
"learning_rate": 3.678635720256737e-05, |
|
"loss": 0.6944, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.7509386733416771, |
|
"grad_norm": 0.13749155498971605, |
|
"learning_rate": 3.529203230624747e-05, |
|
"loss": 0.6758, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.7565011820330969, |
|
"grad_norm": 0.12869094791923574, |
|
"learning_rate": 3.3822162040954354e-05, |
|
"loss": 0.701, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.7620636907245167, |
|
"grad_norm": 0.12906896257797407, |
|
"learning_rate": 3.237730190450816e-05, |
|
"loss": 0.7059, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.7676261994159366, |
|
"grad_norm": 0.15164612968237504, |
|
"learning_rate": 3.0957997942825336e-05, |
|
"loss": 0.6925, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.7731887081073564, |
|
"grad_norm": 0.15092417362430902, |
|
"learning_rate": 2.9564786543555388e-05, |
|
"loss": 0.704, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.7787512167987762, |
|
"grad_norm": 0.12028807778273494, |
|
"learning_rate": 2.819819423336775e-05, |
|
"loss": 0.6615, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.7843137254901961, |
|
"grad_norm": 0.11668516511159772, |
|
"learning_rate": 2.6858737478965035e-05, |
|
"loss": 0.6761, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.7898762341816159, |
|
"grad_norm": 0.12013736025525155, |
|
"learning_rate": 2.5546922491898495e-05, |
|
"loss": 0.6812, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.7954387428730357, |
|
"grad_norm": 0.11406481330250699, |
|
"learning_rate": 2.4263245037258995e-05, |
|
"loss": 0.6653, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.8010012515644556, |
|
"grad_norm": 0.14003374126540685, |
|
"learning_rate": 2.300819024631603e-05, |
|
"loss": 0.7079, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.8065637602558754, |
|
"grad_norm": 0.14609209804734563, |
|
"learning_rate": 2.178223243317532e-05, |
|
"loss": 0.7126, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.8121262689472952, |
|
"grad_norm": 0.1412723606655406, |
|
"learning_rate": 2.058583491552465e-05, |
|
"loss": 0.6805, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.8176887776387151, |
|
"grad_norm": 0.13559476815952162, |
|
"learning_rate": 1.941944983953552e-05, |
|
"loss": 0.6687, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.8232512863301349, |
|
"grad_norm": 0.1212687591445283, |
|
"learning_rate": 1.8283518008986567e-05, |
|
"loss": 0.6949, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.8288137950215547, |
|
"grad_norm": 0.13418537722622348, |
|
"learning_rate": 1.7178468718673714e-05, |
|
"loss": 0.6947, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.8343763037129746, |
|
"grad_norm": 0.14626468131094617, |
|
"learning_rate": 1.6104719592169902e-05, |
|
"loss": 0.6974, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.8399388124043944, |
|
"grad_norm": 0.1505968834590085, |
|
"learning_rate": 1.5062676423995247e-05, |
|
"loss": 0.6699, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.8455013210958142, |
|
"grad_norm": 0.13840692663705956, |
|
"learning_rate": 1.4052733026258281e-05, |
|
"loss": 0.6775, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.851063829787234, |
|
"grad_norm": 0.15338747206444608, |
|
"learning_rate": 1.3075271079825036e-05, |
|
"loss": 0.6855, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.8566263384786539, |
|
"grad_norm": 0.15087298168068788, |
|
"learning_rate": 1.2130659990073146e-05, |
|
"loss": 0.7074, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.8621888471700737, |
|
"grad_norm": 0.15260125382082407, |
|
"learning_rate": 1.1219256747285045e-05, |
|
"loss": 0.6559, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.8677513558614935, |
|
"grad_norm": 0.13676984708600298, |
|
"learning_rate": 1.0341405791733183e-05, |
|
"loss": 0.7018, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.8733138645529134, |
|
"grad_norm": 0.12792881953620902, |
|
"learning_rate": 9.49743888350798e-06, |
|
"loss": 0.6758, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.8788763732443332, |
|
"grad_norm": 0.13024278184248392, |
|
"learning_rate": 8.687674977138116e-06, |
|
"loss": 0.703, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.884438881935753, |
|
"grad_norm": 0.15819074719833112, |
|
"learning_rate": 7.912420101050367e-06, |
|
"loss": 0.7026, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.8900013906271729, |
|
"grad_norm": 0.1363306447853743, |
|
"learning_rate": 7.171967241914224e-06, |
|
"loss": 0.7031, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.8900013906271729, |
|
"eval_loss": 0.6755391955375671, |
|
"eval_runtime": 38.1439, |
|
"eval_samples_per_second": 10.67, |
|
"eval_steps_per_second": 0.682, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.8955638993185927, |
|
"grad_norm": 0.11599580684218645, |
|
"learning_rate": 6.4665962339156005e-06, |
|
"loss": 0.6559, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.9011264080100125, |
|
"grad_norm": 0.14301150354946243, |
|
"learning_rate": 5.7965736530010916e-06, |
|
"loss": 0.7436, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.9066889167014324, |
|
"grad_norm": 0.1454357862013425, |
|
"learning_rate": 5.162152716132662e-06, |
|
"loss": 0.6851, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.9122514253928522, |
|
"grad_norm": 0.1296885702439703, |
|
"learning_rate": 4.563573185591219e-06, |
|
"loss": 0.6799, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.917813934084272, |
|
"grad_norm": 0.13649350003605498, |
|
"learning_rate": 4.0010612783648925e-06, |
|
"loss": 0.6954, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.9233764427756919, |
|
"grad_norm": 0.11566680964002608, |
|
"learning_rate": 3.4748295806564356e-06, |
|
"loss": 0.6931, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.9289389514671117, |
|
"grad_norm": 0.1376951533333156, |
|
"learning_rate": 2.9850769675419774e-06, |
|
"loss": 0.7094, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 0.9345014601585315, |
|
"grad_norm": 0.13815883208860893, |
|
"learning_rate": 2.5319885278115906e-06, |
|
"loss": 0.6814, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.9400639688499514, |
|
"grad_norm": 0.12898446369151903, |
|
"learning_rate": 2.115735494019966e-06, |
|
"loss": 0.7252, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 0.9456264775413712, |
|
"grad_norm": 0.14801072441410335, |
|
"learning_rate": 1.7364751777736332e-06, |
|
"loss": 0.6701, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.951188986232791, |
|
"grad_norm": 0.1489167549426156, |
|
"learning_rate": 1.394350910279385e-06, |
|
"loss": 0.6852, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 0.9567514949242109, |
|
"grad_norm": 0.150107558255733, |
|
"learning_rate": 1.089491988176017e-06, |
|
"loss": 0.7177, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.9623140036156307, |
|
"grad_norm": 0.1462660934588179, |
|
"learning_rate": 8.220136246701926e-07, |
|
"loss": 0.6665, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 0.9678765123070505, |
|
"grad_norm": 0.13216262862731915, |
|
"learning_rate": 5.920169059947411e-07, |
|
"loss": 0.666, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.9734390209984704, |
|
"grad_norm": 0.1497443816075182, |
|
"learning_rate": 3.9958875320580404e-07, |
|
"loss": 0.6902, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.9790015296898902, |
|
"grad_norm": 0.1329859440152703, |
|
"learning_rate": 2.448018893333681e-07, |
|
"loss": 0.6959, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.98456403838131, |
|
"grad_norm": 0.14839381526763562, |
|
"learning_rate": 1.277148118975835e-07, |
|
"loss": 0.6884, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 0.9901265470727298, |
|
"grad_norm": 0.14357565386768453, |
|
"learning_rate": 4.837177080119215e-08, |
|
"loss": 0.6961, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.9956890557641497, |
|
"grad_norm": 0.12423764687193309, |
|
"learning_rate": 6.8027516064606e-09, |
|
"loss": 0.6802, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 0.9990265609790016, |
|
"step": 898, |
|
"total_flos": 3899966689378304.0, |
|
"train_loss": 0.7223505903458542, |
|
"train_runtime": 18859.9671, |
|
"train_samples_per_second": 3.05, |
|
"train_steps_per_second": 0.048 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 898, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 3899966689378304.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|