phi3m0128-wds-0.7-kendall-onof-ofif-corr-max-2-simpo-max1500-default
/
checkpoint-400
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.3675203858964052, | |
"eval_steps": 50, | |
"global_step": 400, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.00918800964741013, | |
"grad_norm": 0.036612071096897125, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": 15.01579761505127, | |
"logits/rejected": 15.359031677246094, | |
"logps/chosen": -0.2681262791156769, | |
"logps/rejected": -0.31947994232177734, | |
"loss": 0.9551, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.40218934416770935, | |
"rewards/margins": 0.07703053951263428, | |
"rewards/rejected": -0.479219913482666, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.01837601929482026, | |
"grad_norm": 0.05575725808739662, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": 14.570712089538574, | |
"logits/rejected": 15.321355819702148, | |
"logps/chosen": -0.2867889404296875, | |
"logps/rejected": -0.3514837622642517, | |
"loss": 0.923, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.43018341064453125, | |
"rewards/margins": 0.09704220294952393, | |
"rewards/rejected": -0.5272256135940552, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.02756402894223039, | |
"grad_norm": 0.0492466576397419, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": 14.748420715332031, | |
"logits/rejected": 14.969354629516602, | |
"logps/chosen": -0.28405922651290894, | |
"logps/rejected": -0.32855403423309326, | |
"loss": 0.9357, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -0.426088809967041, | |
"rewards/margins": 0.06674225628376007, | |
"rewards/rejected": -0.4928310811519623, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.03675203858964052, | |
"grad_norm": 0.05719422921538353, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": 14.28278923034668, | |
"logits/rejected": 14.76964282989502, | |
"logps/chosen": -0.27940627932548523, | |
"logps/rejected": -0.3408831059932709, | |
"loss": 0.9215, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.41910940408706665, | |
"rewards/margins": 0.09221524000167847, | |
"rewards/rejected": -0.5113246440887451, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.04594004823705065, | |
"grad_norm": 0.06247895210981369, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": 14.943578720092773, | |
"logits/rejected": 14.936178207397461, | |
"logps/chosen": -0.2819541394710541, | |
"logps/rejected": -0.3245392441749573, | |
"loss": 0.9464, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.4229312539100647, | |
"rewards/margins": 0.06387762725353241, | |
"rewards/rejected": -0.4868088662624359, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04594004823705065, | |
"eval_logits/chosen": 14.7594575881958, | |
"eval_logits/rejected": 15.193694114685059, | |
"eval_logps/chosen": -0.2807807922363281, | |
"eval_logps/rejected": -0.36209535598754883, | |
"eval_loss": 0.9397181868553162, | |
"eval_rewards/accuracies": 0.5681818127632141, | |
"eval_rewards/chosen": -0.4211711883544922, | |
"eval_rewards/margins": 0.12197184562683105, | |
"eval_rewards/rejected": -0.5431429743766785, | |
"eval_runtime": 24.9762, | |
"eval_samples_per_second": 28.187, | |
"eval_steps_per_second": 3.523, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.05512805788446078, | |
"grad_norm": 0.11519577354192734, | |
"learning_rate": 4.980286753286196e-06, | |
"logits/chosen": 14.996228218078613, | |
"logits/rejected": 15.37781810760498, | |
"logps/chosen": -0.2809831202030182, | |
"logps/rejected": -0.35486167669296265, | |
"loss": 0.9318, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.4214746952056885, | |
"rewards/margins": 0.1108178049325943, | |
"rewards/rejected": -0.5322924852371216, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.06431606753187091, | |
"grad_norm": 0.06691388040781021, | |
"learning_rate": 4.973180832407471e-06, | |
"logits/chosen": 14.612454414367676, | |
"logits/rejected": 15.678136825561523, | |
"logps/chosen": -0.2569667100906372, | |
"logps/rejected": -0.40047627687454224, | |
"loss": 0.9158, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -0.3854501247406006, | |
"rewards/margins": 0.21526429057121277, | |
"rewards/rejected": -0.600714385509491, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.07350407717928104, | |
"grad_norm": 0.05976058170199394, | |
"learning_rate": 4.964990092676263e-06, | |
"logits/chosen": 14.873895645141602, | |
"logits/rejected": 15.50474739074707, | |
"logps/chosen": -0.28742527961730957, | |
"logps/rejected": -0.37555089592933655, | |
"loss": 0.9372, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.43113788962364197, | |
"rewards/margins": 0.13218846917152405, | |
"rewards/rejected": -0.5633264183998108, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.08269208682669117, | |
"grad_norm": 0.0602131113409996, | |
"learning_rate": 4.9557181268217225e-06, | |
"logits/chosen": 14.356691360473633, | |
"logits/rejected": 14.895658493041992, | |
"logps/chosen": -0.2613506317138672, | |
"logps/rejected": -0.3317110538482666, | |
"loss": 0.9324, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.3920259475708008, | |
"rewards/margins": 0.10554064810276031, | |
"rewards/rejected": -0.4975665509700775, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.0918800964741013, | |
"grad_norm": 0.07126503437757492, | |
"learning_rate": 4.9453690018345144e-06, | |
"logits/chosen": 14.862826347351074, | |
"logits/rejected": 15.257089614868164, | |
"logps/chosen": -0.2707213759422302, | |
"logps/rejected": -0.3511395752429962, | |
"loss": 0.9353, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.4060820937156677, | |
"rewards/margins": 0.1206272691488266, | |
"rewards/rejected": -0.5267094373703003, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.0918800964741013, | |
"eval_logits/chosen": 14.664334297180176, | |
"eval_logits/rejected": 15.113536834716797, | |
"eval_logps/chosen": -0.2750833034515381, | |
"eval_logps/rejected": -0.36540210247039795, | |
"eval_loss": 0.9324077367782593, | |
"eval_rewards/accuracies": 0.5795454382896423, | |
"eval_rewards/chosen": -0.41262495517730713, | |
"eval_rewards/margins": 0.1354781985282898, | |
"eval_rewards/rejected": -0.5481031537055969, | |
"eval_runtime": 24.4286, | |
"eval_samples_per_second": 28.819, | |
"eval_steps_per_second": 3.602, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.10106810612151143, | |
"grad_norm": 0.07136944681406021, | |
"learning_rate": 4.933947257182901e-06, | |
"logits/chosen": 14.942098617553711, | |
"logits/rejected": 15.138586044311523, | |
"logps/chosen": -0.2860812246799469, | |
"logps/rejected": -0.36259371042251587, | |
"loss": 0.934, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.42912182211875916, | |
"rewards/margins": 0.11476878076791763, | |
"rewards/rejected": -0.5438905954360962, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.11025611576892155, | |
"grad_norm": 0.07038908451795578, | |
"learning_rate": 4.921457902821578e-06, | |
"logits/chosen": 14.488851547241211, | |
"logits/rejected": 14.702054023742676, | |
"logps/chosen": -0.2662215232849121, | |
"logps/rejected": -0.3013685941696167, | |
"loss": 0.9202, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.39933228492736816, | |
"rewards/margins": 0.05272058770060539, | |
"rewards/rejected": -0.45205289125442505, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.11944412541633169, | |
"grad_norm": 0.06875801086425781, | |
"learning_rate": 4.907906416994146e-06, | |
"logits/chosen": 14.075657844543457, | |
"logits/rejected": 14.696513175964355, | |
"logps/chosen": -0.250360369682312, | |
"logps/rejected": -0.3504650592803955, | |
"loss": 0.9266, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.375540554523468, | |
"rewards/margins": 0.15015706419944763, | |
"rewards/rejected": -0.5256975889205933, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.12863213506374183, | |
"grad_norm": 0.0984601378440857, | |
"learning_rate": 4.893298743830168e-06, | |
"logits/chosen": 13.738212585449219, | |
"logits/rejected": 14.311574935913086, | |
"logps/chosen": -0.26711025834083557, | |
"logps/rejected": -0.3587702810764313, | |
"loss": 0.9185, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.40066537261009216, | |
"rewards/margins": 0.13749003410339355, | |
"rewards/rejected": -0.5381554365158081, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.13782014471115195, | |
"grad_norm": 0.10201425850391388, | |
"learning_rate": 4.8776412907378845e-06, | |
"logits/chosen": 13.7462797164917, | |
"logits/rejected": 14.230626106262207, | |
"logps/chosen": -0.25559619069099426, | |
"logps/rejected": -0.3708702623844147, | |
"loss": 0.9106, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.3833943009376526, | |
"rewards/margins": 0.17291104793548584, | |
"rewards/rejected": -0.5563054084777832, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.13782014471115195, | |
"eval_logits/chosen": 13.458538055419922, | |
"eval_logits/rejected": 13.998083114624023, | |
"eval_logps/chosen": -0.2759075462818146, | |
"eval_logps/rejected": -0.3873325288295746, | |
"eval_loss": 0.9164085388183594, | |
"eval_rewards/accuracies": 0.5795454382896423, | |
"eval_rewards/chosen": -0.41386130452156067, | |
"eval_rewards/margins": 0.1671374887228012, | |
"eval_rewards/rejected": -0.5809988379478455, | |
"eval_runtime": 24.4393, | |
"eval_samples_per_second": 28.806, | |
"eval_steps_per_second": 3.601, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.14700815435856207, | |
"grad_norm": 0.11537656933069229, | |
"learning_rate": 4.860940925593703e-06, | |
"logits/chosen": 12.686149597167969, | |
"logits/rejected": 13.478736877441406, | |
"logps/chosen": -0.23941929638385773, | |
"logps/rejected": -0.3713286519050598, | |
"loss": 0.9094, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.3591288924217224, | |
"rewards/margins": 0.1978640854358673, | |
"rewards/rejected": -0.5569929480552673, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.1561961640059722, | |
"grad_norm": 0.1196313351392746, | |
"learning_rate": 4.84320497372973e-06, | |
"logits/chosen": 13.221656799316406, | |
"logits/rejected": 13.317082405090332, | |
"logps/chosen": -0.3033878207206726, | |
"logps/rejected": -0.3784424960613251, | |
"loss": 0.9057, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.4550817608833313, | |
"rewards/margins": 0.11258199065923691, | |
"rewards/rejected": -0.5676637887954712, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.16538417365338234, | |
"grad_norm": 0.18745549023151398, | |
"learning_rate": 4.824441214720629e-06, | |
"logits/chosen": 11.797627449035645, | |
"logits/rejected": 12.031414985656738, | |
"logps/chosen": -0.2746419608592987, | |
"logps/rejected": -0.3629845976829529, | |
"loss": 0.8954, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.41196292638778687, | |
"rewards/margins": 0.13251398503780365, | |
"rewards/rejected": -0.5444768667221069, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.17457218330079247, | |
"grad_norm": 0.1806156188249588, | |
"learning_rate": 4.804657878971252e-06, | |
"logits/chosen": 10.275301933288574, | |
"logits/rejected": 10.937273025512695, | |
"logps/chosen": -0.2880379557609558, | |
"logps/rejected": -0.4154580533504486, | |
"loss": 0.8875, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.43205690383911133, | |
"rewards/margins": 0.19113019108772278, | |
"rewards/rejected": -0.6231871247291565, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.1837601929482026, | |
"grad_norm": 0.1839464157819748, | |
"learning_rate": 4.783863644106502e-06, | |
"logits/chosen": 10.020039558410645, | |
"logits/rejected": 10.66059398651123, | |
"logps/chosen": -0.3136019706726074, | |
"logps/rejected": -0.4385503828525543, | |
"loss": 0.8647, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.47040295600891113, | |
"rewards/margins": 0.18742261826992035, | |
"rewards/rejected": -0.6578255891799927, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.1837601929482026, | |
"eval_logits/chosen": 9.442557334899902, | |
"eval_logits/rejected": 10.053345680236816, | |
"eval_logps/chosen": -0.3080674409866333, | |
"eval_logps/rejected": -0.4899139702320099, | |
"eval_loss": 0.8702690005302429, | |
"eval_rewards/accuracies": 0.6931818127632141, | |
"eval_rewards/chosen": -0.46210116147994995, | |
"eval_rewards/margins": 0.27276986837387085, | |
"eval_rewards/rejected": -0.7348710894584656, | |
"eval_runtime": 24.4185, | |
"eval_samples_per_second": 28.831, | |
"eval_steps_per_second": 3.604, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.19294820259561274, | |
"grad_norm": 0.269613116979599, | |
"learning_rate": 4.762067631165049e-06, | |
"logits/chosen": 7.941342353820801, | |
"logits/rejected": 8.542920112609863, | |
"logps/chosen": -0.3083941638469696, | |
"logps/rejected": -0.5024437308311462, | |
"loss": 0.8471, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.4625912606716156, | |
"rewards/margins": 0.29107433557510376, | |
"rewards/rejected": -0.7536656856536865, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.20213621224302286, | |
"grad_norm": 0.2640094459056854, | |
"learning_rate": 4.7392794005985324e-06, | |
"logits/chosen": 7.587499141693115, | |
"logits/rejected": 7.592519283294678, | |
"logps/chosen": -0.3381899893283844, | |
"logps/rejected": -0.48494213819503784, | |
"loss": 0.8427, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.5072849988937378, | |
"rewards/margins": 0.22012826800346375, | |
"rewards/rejected": -0.7274132966995239, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.21132422189043298, | |
"grad_norm": 0.29708293080329895, | |
"learning_rate": 4.715508948078037e-06, | |
"logits/chosen": 6.250656604766846, | |
"logits/rejected": 6.7652716636657715, | |
"logps/chosen": -0.3644888997077942, | |
"logps/rejected": -0.5470594167709351, | |
"loss": 0.8201, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.5467333793640137, | |
"rewards/margins": 0.2738557755947113, | |
"rewards/rejected": -0.8205891847610474, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.2205122315378431, | |
"grad_norm": 0.35299497842788696, | |
"learning_rate": 4.690766700109659e-06, | |
"logits/chosen": 4.6331706047058105, | |
"logits/rejected": 4.710076332092285, | |
"logps/chosen": -0.3634452223777771, | |
"logps/rejected": -0.7193974256515503, | |
"loss": 0.7877, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -0.545167863368988, | |
"rewards/margins": 0.5339283347129822, | |
"rewards/rejected": -1.0790963172912598, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.22970024118525326, | |
"grad_norm": 0.4265730082988739, | |
"learning_rate": 4.665063509461098e-06, | |
"logits/chosen": 4.992984771728516, | |
"logits/rejected": 4.606354713439941, | |
"logps/chosen": -0.413116455078125, | |
"logps/rejected": -0.7104976177215576, | |
"loss": 0.7902, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.6196746826171875, | |
"rewards/margins": 0.4460717737674713, | |
"rewards/rejected": -1.0657463073730469, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.22970024118525326, | |
"eval_logits/chosen": 4.127804279327393, | |
"eval_logits/rejected": 3.742251396179199, | |
"eval_logps/chosen": -0.420327365398407, | |
"eval_logps/rejected": -0.7902651429176331, | |
"eval_loss": 0.7682384252548218, | |
"eval_rewards/accuracies": 0.7159090638160706, | |
"eval_rewards/chosen": -0.6304910182952881, | |
"eval_rewards/margins": 0.5549066662788391, | |
"eval_rewards/rejected": -1.185397744178772, | |
"eval_runtime": 24.4318, | |
"eval_samples_per_second": 28.815, | |
"eval_steps_per_second": 3.602, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.23888825083266338, | |
"grad_norm": 0.7236106395721436, | |
"learning_rate": 4.638410650401267e-06, | |
"logits/chosen": 2.454423427581787, | |
"logits/rejected": 1.816563367843628, | |
"logps/chosen": -0.4492695927619934, | |
"logps/rejected": -0.8738088607788086, | |
"loss": 0.6911, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.6739044189453125, | |
"rewards/margins": 0.6368088126182556, | |
"rewards/rejected": -1.3107131719589233, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.2480762604800735, | |
"grad_norm": 0.5856125950813293, | |
"learning_rate": 4.610819813755038e-06, | |
"logits/chosen": 3.2105612754821777, | |
"logits/rejected": 2.5531132221221924, | |
"logps/chosen": -0.537078320980072, | |
"logps/rejected": -1.2025481462478638, | |
"loss": 0.6774, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.80561763048172, | |
"rewards/margins": 0.9982045888900757, | |
"rewards/rejected": -1.8038222789764404, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.25726427012748365, | |
"grad_norm": 0.7396731972694397, | |
"learning_rate": 4.582303101775249e-06, | |
"logits/chosen": 2.0327231884002686, | |
"logits/rejected": 1.4601097106933594, | |
"logps/chosen": -0.47658151388168335, | |
"logps/rejected": -1.3808696269989014, | |
"loss": 0.628, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -0.7148722410202026, | |
"rewards/margins": 1.3564319610595703, | |
"rewards/rejected": -2.0713045597076416, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.2664522797748938, | |
"grad_norm": 2.412203550338745, | |
"learning_rate": 4.55287302283426e-06, | |
"logits/chosen": 1.2262591123580933, | |
"logits/rejected": 0.22599482536315918, | |
"logps/chosen": -0.5671601891517639, | |
"logps/rejected": -1.6760343313217163, | |
"loss": 0.5988, | |
"rewards/accuracies": 0.7250000238418579, | |
"rewards/chosen": -0.8507402539253235, | |
"rewards/margins": 1.6633113622665405, | |
"rewards/rejected": -2.5140514373779297, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.2756402894223039, | |
"grad_norm": 1.0477895736694336, | |
"learning_rate": 4.522542485937369e-06, | |
"logits/chosen": 1.9952911138534546, | |
"logits/rejected": 1.1298446655273438, | |
"logps/chosen": -0.6152974367141724, | |
"logps/rejected": -2.128481388092041, | |
"loss": 0.5927, | |
"rewards/accuracies": 0.6625000238418579, | |
"rewards/chosen": -0.9229460954666138, | |
"rewards/margins": 2.2697763442993164, | |
"rewards/rejected": -3.192722797393799, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.2756402894223039, | |
"eval_logits/chosen": 1.6342910528182983, | |
"eval_logits/rejected": 0.633538007736206, | |
"eval_logps/chosen": -0.606099545955658, | |
"eval_logps/rejected": -1.882785439491272, | |
"eval_loss": 0.5978505611419678, | |
"eval_rewards/accuracies": 0.7159090638160706, | |
"eval_rewards/chosen": -0.909149169921875, | |
"eval_rewards/margins": 1.9150291681289673, | |
"eval_rewards/rejected": -2.824178457260132, | |
"eval_runtime": 24.4299, | |
"eval_samples_per_second": 28.817, | |
"eval_steps_per_second": 3.602, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.284828299069714, | |
"grad_norm": 3.0767388343811035, | |
"learning_rate": 4.491324795060491e-06, | |
"logits/chosen": 1.0374902486801147, | |
"logits/rejected": 0.6220051646232605, | |
"logps/chosen": -0.6778531074523926, | |
"logps/rejected": -1.8290255069732666, | |
"loss": 0.5792, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -1.0167796611785889, | |
"rewards/margins": 1.726758599281311, | |
"rewards/rejected": -2.7435383796691895, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.29401630871712414, | |
"grad_norm": 0.6015120148658752, | |
"learning_rate": 4.4592336433146e-06, | |
"logits/chosen": 1.0050956010818481, | |
"logits/rejected": -0.016118621453642845, | |
"logps/chosen": -0.6865260004997253, | |
"logps/rejected": -2.113417148590088, | |
"loss": 0.5384, | |
"rewards/accuracies": 0.762499988079071, | |
"rewards/chosen": -1.0297890901565552, | |
"rewards/margins": 2.140336513519287, | |
"rewards/rejected": -3.1701254844665527, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.30320431836453426, | |
"grad_norm": 0.7415631413459778, | |
"learning_rate": 4.426283106939474e-06, | |
"logits/chosen": 0.5536144971847534, | |
"logits/rejected": -0.1644023358821869, | |
"logps/chosen": -0.8181726336479187, | |
"logps/rejected": -2.581185817718506, | |
"loss": 0.5671, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -1.2272589206695557, | |
"rewards/margins": 2.644519805908203, | |
"rewards/rejected": -3.871778964996338, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.3123923280119444, | |
"grad_norm": 0.8956871628761292, | |
"learning_rate": 4.3924876391293915e-06, | |
"logits/chosen": 1.6062015295028687, | |
"logits/rejected": 0.9243733286857605, | |
"logps/chosen": -0.8991573452949524, | |
"logps/rejected": -2.935060977935791, | |
"loss": 0.5124, | |
"rewards/accuracies": 0.7250000238418579, | |
"rewards/chosen": -1.348736047744751, | |
"rewards/margins": 3.0538551807403564, | |
"rewards/rejected": -4.402591228485107, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.32158033765935456, | |
"grad_norm": 1.1822264194488525, | |
"learning_rate": 4.357862063693486e-06, | |
"logits/chosen": 1.8171085119247437, | |
"logits/rejected": 1.228049397468567, | |
"logps/chosen": -0.8822734951972961, | |
"logps/rejected": -2.4744174480438232, | |
"loss": 0.516, | |
"rewards/accuracies": 0.7250000238418579, | |
"rewards/chosen": -1.3234103918075562, | |
"rewards/margins": 2.388216018676758, | |
"rewards/rejected": -3.7116265296936035, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.32158033765935456, | |
"eval_logits/chosen": 1.1368330717086792, | |
"eval_logits/rejected": 0.2795785665512085, | |
"eval_logps/chosen": -0.9485350251197815, | |
"eval_logps/rejected": -2.6484899520874023, | |
"eval_loss": 0.5133901238441467, | |
"eval_rewards/accuracies": 0.7727272510528564, | |
"eval_rewards/chosen": -1.4228025674819946, | |
"eval_rewards/margins": 2.5499324798583984, | |
"eval_rewards/rejected": -3.9727354049682617, | |
"eval_runtime": 24.4277, | |
"eval_samples_per_second": 28.82, | |
"eval_steps_per_second": 3.602, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.3307683473067647, | |
"grad_norm": 2.5278775691986084, | |
"learning_rate": 4.322421568553529e-06, | |
"logits/chosen": 1.1921958923339844, | |
"logits/rejected": 0.7565670013427734, | |
"logps/chosen": -1.4180412292480469, | |
"logps/rejected": -3.0890870094299316, | |
"loss": 0.4811, | |
"rewards/accuracies": 0.800000011920929, | |
"rewards/chosen": -2.1270618438720703, | |
"rewards/margins": 2.5065689086914062, | |
"rewards/rejected": -4.633630752563477, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.3399563569541748, | |
"grad_norm": 1.7325788736343384, | |
"learning_rate": 4.286181699082008e-06, | |
"logits/chosen": 0.997096836566925, | |
"logits/rejected": 0.4399908483028412, | |
"logps/chosen": -1.9010308980941772, | |
"logps/rejected": -3.6025185585021973, | |
"loss": 0.4326, | |
"rewards/accuracies": 0.824999988079071, | |
"rewards/chosen": -2.851546287536621, | |
"rewards/margins": 2.552231550216675, | |
"rewards/rejected": -5.403778076171875, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.34914436660158493, | |
"grad_norm": 4.608370304107666, | |
"learning_rate": 4.249158351283414e-06, | |
"logits/chosen": 1.0715999603271484, | |
"logits/rejected": 0.6113725900650024, | |
"logps/chosen": -2.4032845497131348, | |
"logps/rejected": -3.9940528869628906, | |
"loss": 0.4027, | |
"rewards/accuracies": 0.875, | |
"rewards/chosen": -3.6049270629882812, | |
"rewards/margins": 2.3861520290374756, | |
"rewards/rejected": -5.991078853607178, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.35833237624899505, | |
"grad_norm": 4.600816249847412, | |
"learning_rate": 4.211367764821722e-06, | |
"logits/chosen": 1.4443682432174683, | |
"logits/rejected": 0.8617011904716492, | |
"logps/chosen": -2.349093198776245, | |
"logps/rejected": -3.9943203926086426, | |
"loss": 0.4118, | |
"rewards/accuracies": 0.8125, | |
"rewards/chosen": -3.5236401557922363, | |
"rewards/margins": 2.467839479446411, | |
"rewards/rejected": -5.991480350494385, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.3675203858964052, | |
"grad_norm": 2.1458094120025635, | |
"learning_rate": 4.172826515897146e-06, | |
"logits/chosen": 1.3029582500457764, | |
"logits/rejected": 0.7705980539321899, | |
"logps/chosen": -2.192617416381836, | |
"logps/rejected": -3.8413078784942627, | |
"loss": 0.3557, | |
"rewards/accuracies": 0.8500000238418579, | |
"rewards/chosen": -3.288925886154175, | |
"rewards/margins": 2.4730350971221924, | |
"rewards/rejected": -5.761960506439209, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.3675203858964052, | |
"eval_logits/chosen": 0.8742353320121765, | |
"eval_logits/rejected": 0.14320053160190582, | |
"eval_logps/chosen": -2.0894505977630615, | |
"eval_logps/rejected": -4.20783805847168, | |
"eval_loss": 0.4082850515842438, | |
"eval_rewards/accuracies": 0.8863636255264282, | |
"eval_rewards/chosen": -3.1341757774353027, | |
"eval_rewards/margins": 3.1775810718536377, | |
"eval_rewards/rejected": -6.311756610870361, | |
"eval_runtime": 24.4316, | |
"eval_samples_per_second": 28.815, | |
"eval_steps_per_second": 3.602, | |
"step": 400 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 9.720315889194107e+17, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |