|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.6920415224913494, |
|
"eval_steps": 500, |
|
"global_step": 800, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00865051903114187, |
|
"grad_norm": 0.653178334236145, |
|
"learning_rate": 5.142857142857143e-05, |
|
"loss": 2.462, |
|
"mean_token_accuracy": 0.5231182813644409, |
|
"num_tokens": 20480.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01730103806228374, |
|
"grad_norm": 0.4405156075954437, |
|
"learning_rate": 0.00010857142857142856, |
|
"loss": 2.0497, |
|
"mean_token_accuracy": 0.5873411521315575, |
|
"num_tokens": 40960.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.025951557093425604, |
|
"grad_norm": 0.6160047054290771, |
|
"learning_rate": 0.00016571428571428575, |
|
"loss": 1.9284, |
|
"mean_token_accuracy": 0.6067448765039444, |
|
"num_tokens": 61440.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03460207612456748, |
|
"grad_norm": 0.4909583330154419, |
|
"learning_rate": 0.00019999371690018224, |
|
"loss": 1.7739, |
|
"mean_token_accuracy": 0.6309384137392045, |
|
"num_tokens": 81920.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04325259515570934, |
|
"grad_norm": 0.4441400170326233, |
|
"learning_rate": 0.00019992304109437157, |
|
"loss": 1.9242, |
|
"mean_token_accuracy": 0.6014662817120552, |
|
"num_tokens": 102400.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05190311418685121, |
|
"grad_norm": 0.5243574976921082, |
|
"learning_rate": 0.00019977389129787062, |
|
"loss": 1.8928, |
|
"mean_token_accuracy": 0.6027370542287827, |
|
"num_tokens": 122880.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.06055363321799308, |
|
"grad_norm": 0.3737685978412628, |
|
"learning_rate": 0.00019954638464462175, |
|
"loss": 1.8107, |
|
"mean_token_accuracy": 0.6185728281736373, |
|
"num_tokens": 143360.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.06920415224913495, |
|
"grad_norm": 0.5457460284233093, |
|
"learning_rate": 0.00019924069980567822, |
|
"loss": 1.7227, |
|
"mean_token_accuracy": 0.6354838699102402, |
|
"num_tokens": 163840.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.07785467128027682, |
|
"grad_norm": 0.41756778955459595, |
|
"learning_rate": 0.00019885707684888566, |
|
"loss": 1.7249, |
|
"mean_token_accuracy": 0.6385141760110855, |
|
"num_tokens": 184320.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.08650519031141868, |
|
"grad_norm": 0.39207473397254944, |
|
"learning_rate": 0.00019839581705034624, |
|
"loss": 1.6423, |
|
"mean_token_accuracy": 0.6423753708600998, |
|
"num_tokens": 204800.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.09515570934256055, |
|
"grad_norm": 0.46543020009994507, |
|
"learning_rate": 0.00019785728265781324, |
|
"loss": 1.5637, |
|
"mean_token_accuracy": 0.6588465303182602, |
|
"num_tokens": 225280.0, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.10380622837370242, |
|
"grad_norm": 0.4604319930076599, |
|
"learning_rate": 0.0001972418966062018, |
|
"loss": 1.5936, |
|
"mean_token_accuracy": 0.6586021468043327, |
|
"num_tokens": 245760.0, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.11245674740484429, |
|
"grad_norm": 0.34409117698669434, |
|
"learning_rate": 0.0001965501421854394, |
|
"loss": 1.7365, |
|
"mean_token_accuracy": 0.6300097703933716, |
|
"num_tokens": 266240.0, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.12110726643598616, |
|
"grad_norm": 0.46972155570983887, |
|
"learning_rate": 0.0001957825626609169, |
|
"loss": 1.6823, |
|
"mean_token_accuracy": 0.6419354841113091, |
|
"num_tokens": 286720.0, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.12975778546712802, |
|
"grad_norm": 0.45500946044921875, |
|
"learning_rate": 0.00019493976084683813, |
|
"loss": 1.4338, |
|
"mean_token_accuracy": 0.6832355856895447, |
|
"num_tokens": 307200.0, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.1384083044982699, |
|
"grad_norm": 0.4167431890964508, |
|
"learning_rate": 0.0001940223986328032, |
|
"loss": 1.7327, |
|
"mean_token_accuracy": 0.6318181827664375, |
|
"num_tokens": 327680.0, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.14705882352941177, |
|
"grad_norm": 0.43620193004608154, |
|
"learning_rate": 0.00019303119646399728, |
|
"loss": 1.5997, |
|
"mean_token_accuracy": 0.6583577707409859, |
|
"num_tokens": 348160.0, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.15570934256055363, |
|
"grad_norm": 0.48302027583122253, |
|
"learning_rate": 0.00019196693277539307, |
|
"loss": 1.79, |
|
"mean_token_accuracy": 0.617741933465004, |
|
"num_tokens": 368640.0, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.1643598615916955, |
|
"grad_norm": 0.49784767627716064, |
|
"learning_rate": 0.00019083044338041134, |
|
"loss": 1.5624, |
|
"mean_token_accuracy": 0.6568426251411438, |
|
"num_tokens": 389120.0, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.17301038062283736, |
|
"grad_norm": 0.3815184235572815, |
|
"learning_rate": 0.00018962262081451966, |
|
"loss": 1.5723, |
|
"mean_token_accuracy": 0.6639296174049377, |
|
"num_tokens": 409600.0, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.18166089965397925, |
|
"grad_norm": 0.43787622451782227, |
|
"learning_rate": 0.00018834441363428463, |
|
"loss": 1.7278, |
|
"mean_token_accuracy": 0.6321114301681519, |
|
"num_tokens": 430080.0, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.1903114186851211, |
|
"grad_norm": 0.4128075838088989, |
|
"learning_rate": 0.00018699682567242863, |
|
"loss": 1.6181, |
|
"mean_token_accuracy": 0.6524437993764878, |
|
"num_tokens": 450560.0, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.19896193771626297, |
|
"grad_norm": 0.35929927229881287, |
|
"learning_rate": 0.00018558091524947524, |
|
"loss": 1.6799, |
|
"mean_token_accuracy": 0.6450635358691216, |
|
"num_tokens": 471040.0, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.20761245674740483, |
|
"grad_norm": 0.4328981637954712, |
|
"learning_rate": 0.00018409779434260342, |
|
"loss": 1.2708, |
|
"mean_token_accuracy": 0.7201857253909111, |
|
"num_tokens": 491520.0, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.21626297577854672, |
|
"grad_norm": 0.4529803991317749, |
|
"learning_rate": 0.00018254862771236257, |
|
"loss": 1.718, |
|
"mean_token_accuracy": 0.6376832827925683, |
|
"num_tokens": 512000.0, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.22491349480968859, |
|
"grad_norm": 0.39349377155303955, |
|
"learning_rate": 0.00018093463198793432, |
|
"loss": 1.6715, |
|
"mean_token_accuracy": 0.6447702810168267, |
|
"num_tokens": 532480.0, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.23356401384083045, |
|
"grad_norm": 0.3997224271297455, |
|
"learning_rate": 0.00017925707471165992, |
|
"loss": 1.5151, |
|
"mean_token_accuracy": 0.6776148617267609, |
|
"num_tokens": 552960.0, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.2422145328719723, |
|
"grad_norm": 0.4060993492603302, |
|
"learning_rate": 0.00017751727334358307, |
|
"loss": 1.5702, |
|
"mean_token_accuracy": 0.6609481915831565, |
|
"num_tokens": 573440.0, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.2508650519031142, |
|
"grad_norm": 0.369098424911499, |
|
"learning_rate": 0.00017571659422679003, |
|
"loss": 1.3934, |
|
"mean_token_accuracy": 0.6983382269740105, |
|
"num_tokens": 593920.0, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.25951557093425603, |
|
"grad_norm": 0.4169941842556, |
|
"learning_rate": 0.00017385645151436, |
|
"loss": 1.5608, |
|
"mean_token_accuracy": 0.6624144673347473, |
|
"num_tokens": 614400.0, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.2681660899653979, |
|
"grad_norm": 0.44405296444892883, |
|
"learning_rate": 0.00017193830605876804, |
|
"loss": 1.6689, |
|
"mean_token_accuracy": 0.6325513184070587, |
|
"num_tokens": 634880.0, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.2768166089965398, |
|
"grad_norm": 0.4252229630947113, |
|
"learning_rate": 0.0001699636642646129, |
|
"loss": 1.4817, |
|
"mean_token_accuracy": 0.6717986300587654, |
|
"num_tokens": 655360.0, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.28546712802768165, |
|
"grad_norm": 0.4406476318836212, |
|
"learning_rate": 0.00016793407690557076, |
|
"loss": 1.6879, |
|
"mean_token_accuracy": 0.640762460231781, |
|
"num_tokens": 675840.0, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.29411764705882354, |
|
"grad_norm": 0.42295652627944946, |
|
"learning_rate": 0.00016585113790650388, |
|
"loss": 1.5637, |
|
"mean_token_accuracy": 0.6610459357500076, |
|
"num_tokens": 696320.0, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.3027681660899654, |
|
"grad_norm": 0.4155745208263397, |
|
"learning_rate": 0.00016371648309168097, |
|
"loss": 1.7478, |
|
"mean_token_accuracy": 0.6266862124204635, |
|
"num_tokens": 716800.0, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.31141868512110726, |
|
"grad_norm": 0.3854524791240692, |
|
"learning_rate": 0.0001615317889000918, |
|
"loss": 1.3819, |
|
"mean_token_accuracy": 0.6973118335008621, |
|
"num_tokens": 737280.0, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.32006920415224915, |
|
"grad_norm": 0.4296940565109253, |
|
"learning_rate": 0.00015929877106886535, |
|
"loss": 1.5729, |
|
"mean_token_accuracy": 0.6544476956129074, |
|
"num_tokens": 757760.0, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.328719723183391, |
|
"grad_norm": 0.5267869830131531, |
|
"learning_rate": 0.0001570191832858256, |
|
"loss": 1.5866, |
|
"mean_token_accuracy": 0.6566959977149963, |
|
"num_tokens": 778240.0, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.3373702422145329, |
|
"grad_norm": 0.4024481177330017, |
|
"learning_rate": 0.00015469481581224272, |
|
"loss": 1.5171, |
|
"mean_token_accuracy": 0.67121212631464, |
|
"num_tokens": 798720.0, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.3460207612456747, |
|
"grad_norm": 0.48981329798698425, |
|
"learning_rate": 0.0001523274940768614, |
|
"loss": 1.6027, |
|
"mean_token_accuracy": 0.6437927693128586, |
|
"num_tokens": 819200.0, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.3546712802768166, |
|
"grad_norm": 0.39708444476127625, |
|
"learning_rate": 0.00014991907724231122, |
|
"loss": 1.7412, |
|
"mean_token_accuracy": 0.6313782975077629, |
|
"num_tokens": 839680.0, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.3633217993079585, |
|
"grad_norm": 0.3914325535297394, |
|
"learning_rate": 0.0001474714567450234, |
|
"loss": 1.445, |
|
"mean_token_accuracy": 0.6826490700244904, |
|
"num_tokens": 860160.0, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.3719723183391003, |
|
"grad_norm": 0.43571996688842773, |
|
"learning_rate": 0.00014498655480980226, |
|
"loss": 1.4578, |
|
"mean_token_accuracy": 0.6875855386257171, |
|
"num_tokens": 880640.0, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.3806228373702422, |
|
"grad_norm": 0.37934359908103943, |
|
"learning_rate": 0.00014246632294021705, |
|
"loss": 1.332, |
|
"mean_token_accuracy": 0.7121700882911682, |
|
"num_tokens": 901120.0, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.3892733564013841, |
|
"grad_norm": 0.4126007854938507, |
|
"learning_rate": 0.00013991274038599927, |
|
"loss": 1.6188, |
|
"mean_token_accuracy": 0.6448191583156586, |
|
"num_tokens": 921600.0, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.39792387543252594, |
|
"grad_norm": 0.3285645544528961, |
|
"learning_rate": 0.00013732781258865066, |
|
"loss": 1.399, |
|
"mean_token_accuracy": 0.695943309366703, |
|
"num_tokens": 942080.0, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.40657439446366783, |
|
"grad_norm": 0.48543041944503784, |
|
"learning_rate": 0.00013471356960648092, |
|
"loss": 1.693, |
|
"mean_token_accuracy": 0.6381231769919395, |
|
"num_tokens": 962560.0, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.41522491349480967, |
|
"grad_norm": 0.28301766514778137, |
|
"learning_rate": 0.00013207206452031303, |
|
"loss": 1.3507, |
|
"mean_token_accuracy": 0.7064516082406044, |
|
"num_tokens": 983040.0, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.42387543252595156, |
|
"grad_norm": 0.44277122616767883, |
|
"learning_rate": 0.00012940537182110797, |
|
"loss": 1.6078, |
|
"mean_token_accuracy": 0.6580645143985748, |
|
"num_tokens": 1003520.0, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.43252595155709345, |
|
"grad_norm": 0.4079855978488922, |
|
"learning_rate": 0.00012671558578077497, |
|
"loss": 1.7974, |
|
"mean_token_accuracy": 0.6107038110494614, |
|
"num_tokens": 1024000.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.4411764705882353, |
|
"grad_norm": 0.46380290389060974, |
|
"learning_rate": 0.00012400481880744704, |
|
"loss": 1.3914, |
|
"mean_token_accuracy": 0.6991202354431152, |
|
"num_tokens": 1044480.0, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.44982698961937717, |
|
"grad_norm": 0.5048119425773621, |
|
"learning_rate": 0.00012127519978651344, |
|
"loss": 1.5477, |
|
"mean_token_accuracy": 0.6672043025493621, |
|
"num_tokens": 1064960.0, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.458477508650519, |
|
"grad_norm": 0.386057585477829, |
|
"learning_rate": 0.00011852887240871145, |
|
"loss": 1.6146, |
|
"mean_token_accuracy": 0.645259040594101, |
|
"num_tokens": 1085440.0, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.4671280276816609, |
|
"grad_norm": 0.42855167388916016, |
|
"learning_rate": 0.00011576799348659118, |
|
"loss": 1.1688, |
|
"mean_token_accuracy": 0.7390029281377792, |
|
"num_tokens": 1105920.0, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.4757785467128028, |
|
"grad_norm": 0.6189476251602173, |
|
"learning_rate": 0.00011299473126067508, |
|
"loss": 1.4494, |
|
"mean_token_accuracy": 0.6824944317340851, |
|
"num_tokens": 1126335.0, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.4844290657439446, |
|
"grad_norm": 0.3980766236782074, |
|
"learning_rate": 0.00011021126369664275, |
|
"loss": 1.4861, |
|
"mean_token_accuracy": 0.6763929545879364, |
|
"num_tokens": 1146815.0, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.4930795847750865, |
|
"grad_norm": 0.453605979681015, |
|
"learning_rate": 0.00010741977677487777, |
|
"loss": 1.4266, |
|
"mean_token_accuracy": 0.6879765376448631, |
|
"num_tokens": 1167295.0, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.5017301038062284, |
|
"grad_norm": 0.49681079387664795, |
|
"learning_rate": 0.00010462246277372071, |
|
"loss": 1.6789, |
|
"mean_token_accuracy": 0.638269791007042, |
|
"num_tokens": 1187775.0, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.5103806228373703, |
|
"grad_norm": 0.35436248779296875, |
|
"learning_rate": 0.00010182151854777568, |
|
"loss": 1.6145, |
|
"mean_token_accuracy": 0.6567448630928994, |
|
"num_tokens": 1208255.0, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.5190311418685121, |
|
"grad_norm": 0.4127649664878845, |
|
"learning_rate": 9.901914380262314e-05, |
|
"loss": 1.6327, |
|
"mean_token_accuracy": 0.6463831856846809, |
|
"num_tokens": 1228735.0, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.527681660899654, |
|
"grad_norm": 0.4014521837234497, |
|
"learning_rate": 9.621753936729359e-05, |
|
"loss": 1.6727, |
|
"mean_token_accuracy": 0.6405180916190147, |
|
"num_tokens": 1249215.0, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.5363321799307958, |
|
"grad_norm": 0.2921870946884155, |
|
"learning_rate": 9.341890546585923e-05, |
|
"loss": 1.3427, |
|
"mean_token_accuracy": 0.7034213155508041, |
|
"num_tokens": 1269695.0, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.5449826989619377, |
|
"grad_norm": 0.40779879689216614, |
|
"learning_rate": 9.062543998950026e-05, |
|
"loss": 1.6133, |
|
"mean_token_accuracy": 0.6533724322915078, |
|
"num_tokens": 1290175.0, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.5536332179930796, |
|
"grad_norm": 0.49637043476104736, |
|
"learning_rate": 8.783933677040384e-05, |
|
"loss": 1.5598, |
|
"mean_token_accuracy": 0.6616813331842423, |
|
"num_tokens": 1310655.0, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.5622837370242214, |
|
"grad_norm": 0.5044928193092346, |
|
"learning_rate": 8.50627838588502e-05, |
|
"loss": 1.3112, |
|
"mean_token_accuracy": 0.7180351942777634, |
|
"num_tokens": 1331135.0, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.5709342560553633, |
|
"grad_norm": 0.2617112994194031, |
|
"learning_rate": 8.229796180484019e-05, |
|
"loss": 1.3561, |
|
"mean_token_accuracy": 0.6991202354431152, |
|
"num_tokens": 1351615.0, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.5795847750865052, |
|
"grad_norm": 0.40844273567199707, |
|
"learning_rate": 7.954704194561235e-05, |
|
"loss": 1.3602, |
|
"mean_token_accuracy": 0.7007820129394531, |
|
"num_tokens": 1372095.0, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.5882352941176471, |
|
"grad_norm": 0.4878994822502136, |
|
"learning_rate": 7.681218470039598e-05, |
|
"loss": 1.6032, |
|
"mean_token_accuracy": 0.6508797645568848, |
|
"num_tokens": 1392575.0, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.596885813148789, |
|
"grad_norm": 0.42092955112457275, |
|
"learning_rate": 7.409553787373795e-05, |
|
"loss": 1.4468, |
|
"mean_token_accuracy": 0.6830400794744491, |
|
"num_tokens": 1413055.0, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.6055363321799307, |
|
"grad_norm": 0.3226411044597626, |
|
"learning_rate": 7.13992349687367e-05, |
|
"loss": 1.4495, |
|
"mean_token_accuracy": 0.6836265861988068, |
|
"num_tokens": 1433535.0, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.6141868512110726, |
|
"grad_norm": 0.4431033432483673, |
|
"learning_rate": 6.872539351150785e-05, |
|
"loss": 1.5614, |
|
"mean_token_accuracy": 0.6589931562542916, |
|
"num_tokens": 1454015.0, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.6228373702422145, |
|
"grad_norm": 0.479476660490036, |
|
"learning_rate": 6.607611338819697e-05, |
|
"loss": 1.6109, |
|
"mean_token_accuracy": 0.6578690126538277, |
|
"num_tokens": 1474495.0, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.6314878892733564, |
|
"grad_norm": 0.38295578956604004, |
|
"learning_rate": 6.345347519584616e-05, |
|
"loss": 1.4551, |
|
"mean_token_accuracy": 0.6815249294042587, |
|
"num_tokens": 1494975.0, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.6401384083044983, |
|
"grad_norm": 0.49733808636665344, |
|
"learning_rate": 6.085953860840877e-05, |
|
"loss": 1.3829, |
|
"mean_token_accuracy": 0.696089930832386, |
|
"num_tokens": 1515455.0, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.6487889273356401, |
|
"grad_norm": 0.44113320112228394, |
|
"learning_rate": 5.829634075919652e-05, |
|
"loss": 1.6253, |
|
"mean_token_accuracy": 0.6462854355573654, |
|
"num_tokens": 1535935.0, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.657439446366782, |
|
"grad_norm": 0.32002171874046326, |
|
"learning_rate": 5.5765894641028196e-05, |
|
"loss": 1.2533, |
|
"mean_token_accuracy": 0.7238514199852943, |
|
"num_tokens": 1556415.0, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.6660899653979239, |
|
"grad_norm": 0.5036414861679077, |
|
"learning_rate": 5.327018752533737e-05, |
|
"loss": 1.4791, |
|
"mean_token_accuracy": 0.6784946173429489, |
|
"num_tokens": 1576895.0, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.6747404844290658, |
|
"grad_norm": 0.465110182762146, |
|
"learning_rate": 5.0811179401479903e-05, |
|
"loss": 1.4898, |
|
"mean_token_accuracy": 0.6763440907001496, |
|
"num_tokens": 1597375.0, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.6833910034602076, |
|
"grad_norm": 0.4566653072834015, |
|
"learning_rate": 4.8390801437467927e-05, |
|
"loss": 1.5825, |
|
"mean_token_accuracy": 0.6620723336935044, |
|
"num_tokens": 1617855.0, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.6920415224913494, |
|
"grad_norm": 0.45508384704589844, |
|
"learning_rate": 4.601095446333741e-05, |
|
"loss": 1.4375, |
|
"mean_token_accuracy": 0.6909090906381607, |
|
"num_tokens": 1638335.0, |
|
"step": 800 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1156, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.794732049845248e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|