phi3m0128-wds-0.7-kendall-onof-ofif-corr-max-2-simpo-max1500-default
/
checkpoint-250
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.22970024118525326, | |
"eval_steps": 50, | |
"global_step": 250, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.00918800964741013, | |
"grad_norm": 0.036612071096897125, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": 15.01579761505127, | |
"logits/rejected": 15.359031677246094, | |
"logps/chosen": -0.2681262791156769, | |
"logps/rejected": -0.31947994232177734, | |
"loss": 0.9551, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.40218934416770935, | |
"rewards/margins": 0.07703053951263428, | |
"rewards/rejected": -0.479219913482666, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.01837601929482026, | |
"grad_norm": 0.05575725808739662, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": 14.570712089538574, | |
"logits/rejected": 15.321355819702148, | |
"logps/chosen": -0.2867889404296875, | |
"logps/rejected": -0.3514837622642517, | |
"loss": 0.923, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.43018341064453125, | |
"rewards/margins": 0.09704220294952393, | |
"rewards/rejected": -0.5272256135940552, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.02756402894223039, | |
"grad_norm": 0.0492466576397419, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": 14.748420715332031, | |
"logits/rejected": 14.969354629516602, | |
"logps/chosen": -0.28405922651290894, | |
"logps/rejected": -0.32855403423309326, | |
"loss": 0.9357, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -0.426088809967041, | |
"rewards/margins": 0.06674225628376007, | |
"rewards/rejected": -0.4928310811519623, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.03675203858964052, | |
"grad_norm": 0.05719422921538353, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": 14.28278923034668, | |
"logits/rejected": 14.76964282989502, | |
"logps/chosen": -0.27940627932548523, | |
"logps/rejected": -0.3408831059932709, | |
"loss": 0.9215, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.41910940408706665, | |
"rewards/margins": 0.09221524000167847, | |
"rewards/rejected": -0.5113246440887451, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.04594004823705065, | |
"grad_norm": 0.06247895210981369, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": 14.943578720092773, | |
"logits/rejected": 14.936178207397461, | |
"logps/chosen": -0.2819541394710541, | |
"logps/rejected": -0.3245392441749573, | |
"loss": 0.9464, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.4229312539100647, | |
"rewards/margins": 0.06387762725353241, | |
"rewards/rejected": -0.4868088662624359, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04594004823705065, | |
"eval_logits/chosen": 14.7594575881958, | |
"eval_logits/rejected": 15.193694114685059, | |
"eval_logps/chosen": -0.2807807922363281, | |
"eval_logps/rejected": -0.36209535598754883, | |
"eval_loss": 0.9397181868553162, | |
"eval_rewards/accuracies": 0.5681818127632141, | |
"eval_rewards/chosen": -0.4211711883544922, | |
"eval_rewards/margins": 0.12197184562683105, | |
"eval_rewards/rejected": -0.5431429743766785, | |
"eval_runtime": 24.9762, | |
"eval_samples_per_second": 28.187, | |
"eval_steps_per_second": 3.523, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.05512805788446078, | |
"grad_norm": 0.11519577354192734, | |
"learning_rate": 4.980286753286196e-06, | |
"logits/chosen": 14.996228218078613, | |
"logits/rejected": 15.37781810760498, | |
"logps/chosen": -0.2809831202030182, | |
"logps/rejected": -0.35486167669296265, | |
"loss": 0.9318, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.4214746952056885, | |
"rewards/margins": 0.1108178049325943, | |
"rewards/rejected": -0.5322924852371216, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.06431606753187091, | |
"grad_norm": 0.06691388040781021, | |
"learning_rate": 4.973180832407471e-06, | |
"logits/chosen": 14.612454414367676, | |
"logits/rejected": 15.678136825561523, | |
"logps/chosen": -0.2569667100906372, | |
"logps/rejected": -0.40047627687454224, | |
"loss": 0.9158, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -0.3854501247406006, | |
"rewards/margins": 0.21526429057121277, | |
"rewards/rejected": -0.600714385509491, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.07350407717928104, | |
"grad_norm": 0.05976058170199394, | |
"learning_rate": 4.964990092676263e-06, | |
"logits/chosen": 14.873895645141602, | |
"logits/rejected": 15.50474739074707, | |
"logps/chosen": -0.28742527961730957, | |
"logps/rejected": -0.37555089592933655, | |
"loss": 0.9372, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.43113788962364197, | |
"rewards/margins": 0.13218846917152405, | |
"rewards/rejected": -0.5633264183998108, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.08269208682669117, | |
"grad_norm": 0.0602131113409996, | |
"learning_rate": 4.9557181268217225e-06, | |
"logits/chosen": 14.356691360473633, | |
"logits/rejected": 14.895658493041992, | |
"logps/chosen": -0.2613506317138672, | |
"logps/rejected": -0.3317110538482666, | |
"loss": 0.9324, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.3920259475708008, | |
"rewards/margins": 0.10554064810276031, | |
"rewards/rejected": -0.4975665509700775, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.0918800964741013, | |
"grad_norm": 0.07126503437757492, | |
"learning_rate": 4.9453690018345144e-06, | |
"logits/chosen": 14.862826347351074, | |
"logits/rejected": 15.257089614868164, | |
"logps/chosen": -0.2707213759422302, | |
"logps/rejected": -0.3511395752429962, | |
"loss": 0.9353, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.4060820937156677, | |
"rewards/margins": 0.1206272691488266, | |
"rewards/rejected": -0.5267094373703003, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.0918800964741013, | |
"eval_logits/chosen": 14.664334297180176, | |
"eval_logits/rejected": 15.113536834716797, | |
"eval_logps/chosen": -0.2750833034515381, | |
"eval_logps/rejected": -0.36540210247039795, | |
"eval_loss": 0.9324077367782593, | |
"eval_rewards/accuracies": 0.5795454382896423, | |
"eval_rewards/chosen": -0.41262495517730713, | |
"eval_rewards/margins": 0.1354781985282898, | |
"eval_rewards/rejected": -0.5481031537055969, | |
"eval_runtime": 24.4286, | |
"eval_samples_per_second": 28.819, | |
"eval_steps_per_second": 3.602, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.10106810612151143, | |
"grad_norm": 0.07136944681406021, | |
"learning_rate": 4.933947257182901e-06, | |
"logits/chosen": 14.942098617553711, | |
"logits/rejected": 15.138586044311523, | |
"logps/chosen": -0.2860812246799469, | |
"logps/rejected": -0.36259371042251587, | |
"loss": 0.934, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.42912182211875916, | |
"rewards/margins": 0.11476878076791763, | |
"rewards/rejected": -0.5438905954360962, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.11025611576892155, | |
"grad_norm": 0.07038908451795578, | |
"learning_rate": 4.921457902821578e-06, | |
"logits/chosen": 14.488851547241211, | |
"logits/rejected": 14.702054023742676, | |
"logps/chosen": -0.2662215232849121, | |
"logps/rejected": -0.3013685941696167, | |
"loss": 0.9202, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.39933228492736816, | |
"rewards/margins": 0.05272058770060539, | |
"rewards/rejected": -0.45205289125442505, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.11944412541633169, | |
"grad_norm": 0.06875801086425781, | |
"learning_rate": 4.907906416994146e-06, | |
"logits/chosen": 14.075657844543457, | |
"logits/rejected": 14.696513175964355, | |
"logps/chosen": -0.250360369682312, | |
"logps/rejected": -0.3504650592803955, | |
"loss": 0.9266, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.375540554523468, | |
"rewards/margins": 0.15015706419944763, | |
"rewards/rejected": -0.5256975889205933, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.12863213506374183, | |
"grad_norm": 0.0984601378440857, | |
"learning_rate": 4.893298743830168e-06, | |
"logits/chosen": 13.738212585449219, | |
"logits/rejected": 14.311574935913086, | |
"logps/chosen": -0.26711025834083557, | |
"logps/rejected": -0.3587702810764313, | |
"loss": 0.9185, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.40066537261009216, | |
"rewards/margins": 0.13749003410339355, | |
"rewards/rejected": -0.5381554365158081, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.13782014471115195, | |
"grad_norm": 0.10201425850391388, | |
"learning_rate": 4.8776412907378845e-06, | |
"logits/chosen": 13.7462797164917, | |
"logits/rejected": 14.230626106262207, | |
"logps/chosen": -0.25559619069099426, | |
"logps/rejected": -0.3708702623844147, | |
"loss": 0.9106, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.3833943009376526, | |
"rewards/margins": 0.17291104793548584, | |
"rewards/rejected": -0.5563054084777832, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.13782014471115195, | |
"eval_logits/chosen": 13.458538055419922, | |
"eval_logits/rejected": 13.998083114624023, | |
"eval_logps/chosen": -0.2759075462818146, | |
"eval_logps/rejected": -0.3873325288295746, | |
"eval_loss": 0.9164085388183594, | |
"eval_rewards/accuracies": 0.5795454382896423, | |
"eval_rewards/chosen": -0.41386130452156067, | |
"eval_rewards/margins": 0.1671374887228012, | |
"eval_rewards/rejected": -0.5809988379478455, | |
"eval_runtime": 24.4393, | |
"eval_samples_per_second": 28.806, | |
"eval_steps_per_second": 3.601, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.14700815435856207, | |
"grad_norm": 0.11537656933069229, | |
"learning_rate": 4.860940925593703e-06, | |
"logits/chosen": 12.686149597167969, | |
"logits/rejected": 13.478736877441406, | |
"logps/chosen": -0.23941929638385773, | |
"logps/rejected": -0.3713286519050598, | |
"loss": 0.9094, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.3591288924217224, | |
"rewards/margins": 0.1978640854358673, | |
"rewards/rejected": -0.5569929480552673, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.1561961640059722, | |
"grad_norm": 0.1196313351392746, | |
"learning_rate": 4.84320497372973e-06, | |
"logits/chosen": 13.221656799316406, | |
"logits/rejected": 13.317082405090332, | |
"logps/chosen": -0.3033878207206726, | |
"logps/rejected": -0.3784424960613251, | |
"loss": 0.9057, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.4550817608833313, | |
"rewards/margins": 0.11258199065923691, | |
"rewards/rejected": -0.5676637887954712, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.16538417365338234, | |
"grad_norm": 0.18745549023151398, | |
"learning_rate": 4.824441214720629e-06, | |
"logits/chosen": 11.797627449035645, | |
"logits/rejected": 12.031414985656738, | |
"logps/chosen": -0.2746419608592987, | |
"logps/rejected": -0.3629845976829529, | |
"loss": 0.8954, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.41196292638778687, | |
"rewards/margins": 0.13251398503780365, | |
"rewards/rejected": -0.5444768667221069, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.17457218330079247, | |
"grad_norm": 0.1806156188249588, | |
"learning_rate": 4.804657878971252e-06, | |
"logits/chosen": 10.275301933288574, | |
"logits/rejected": 10.937273025512695, | |
"logps/chosen": -0.2880379557609558, | |
"logps/rejected": -0.4154580533504486, | |
"loss": 0.8875, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.43205690383911133, | |
"rewards/margins": 0.19113019108772278, | |
"rewards/rejected": -0.6231871247291565, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.1837601929482026, | |
"grad_norm": 0.1839464157819748, | |
"learning_rate": 4.783863644106502e-06, | |
"logits/chosen": 10.020039558410645, | |
"logits/rejected": 10.66059398651123, | |
"logps/chosen": -0.3136019706726074, | |
"logps/rejected": -0.4385503828525543, | |
"loss": 0.8647, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.47040295600891113, | |
"rewards/margins": 0.18742261826992035, | |
"rewards/rejected": -0.6578255891799927, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.1837601929482026, | |
"eval_logits/chosen": 9.442557334899902, | |
"eval_logits/rejected": 10.053345680236816, | |
"eval_logps/chosen": -0.3080674409866333, | |
"eval_logps/rejected": -0.4899139702320099, | |
"eval_loss": 0.8702690005302429, | |
"eval_rewards/accuracies": 0.6931818127632141, | |
"eval_rewards/chosen": -0.46210116147994995, | |
"eval_rewards/margins": 0.27276986837387085, | |
"eval_rewards/rejected": -0.7348710894584656, | |
"eval_runtime": 24.4185, | |
"eval_samples_per_second": 28.831, | |
"eval_steps_per_second": 3.604, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.19294820259561274, | |
"grad_norm": 0.269613116979599, | |
"learning_rate": 4.762067631165049e-06, | |
"logits/chosen": 7.941342353820801, | |
"logits/rejected": 8.542920112609863, | |
"logps/chosen": -0.3083941638469696, | |
"logps/rejected": -0.5024437308311462, | |
"loss": 0.8471, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.4625912606716156, | |
"rewards/margins": 0.29107433557510376, | |
"rewards/rejected": -0.7536656856536865, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.20213621224302286, | |
"grad_norm": 0.2640094459056854, | |
"learning_rate": 4.7392794005985324e-06, | |
"logits/chosen": 7.587499141693115, | |
"logits/rejected": 7.592519283294678, | |
"logps/chosen": -0.3381899893283844, | |
"logps/rejected": -0.48494213819503784, | |
"loss": 0.8427, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.5072849988937378, | |
"rewards/margins": 0.22012826800346375, | |
"rewards/rejected": -0.7274132966995239, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.21132422189043298, | |
"grad_norm": 0.29708293080329895, | |
"learning_rate": 4.715508948078037e-06, | |
"logits/chosen": 6.250656604766846, | |
"logits/rejected": 6.7652716636657715, | |
"logps/chosen": -0.3644888997077942, | |
"logps/rejected": -0.5470594167709351, | |
"loss": 0.8201, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.5467333793640137, | |
"rewards/margins": 0.2738557755947113, | |
"rewards/rejected": -0.8205891847610474, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.2205122315378431, | |
"grad_norm": 0.35299497842788696, | |
"learning_rate": 4.690766700109659e-06, | |
"logits/chosen": 4.6331706047058105, | |
"logits/rejected": 4.710076332092285, | |
"logps/chosen": -0.3634452223777771, | |
"logps/rejected": -0.7193974256515503, | |
"loss": 0.7877, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -0.545167863368988, | |
"rewards/margins": 0.5339283347129822, | |
"rewards/rejected": -1.0790963172912598, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.22970024118525326, | |
"grad_norm": 0.4265730082988739, | |
"learning_rate": 4.665063509461098e-06, | |
"logits/chosen": 4.992984771728516, | |
"logits/rejected": 4.606354713439941, | |
"logps/chosen": -0.413116455078125, | |
"logps/rejected": -0.7104976177215576, | |
"loss": 0.7902, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.6196746826171875, | |
"rewards/margins": 0.4460717737674713, | |
"rewards/rejected": -1.0657463073730469, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.22970024118525326, | |
"eval_logits/chosen": 4.127804279327393, | |
"eval_logits/rejected": 3.742251396179199, | |
"eval_logps/chosen": -0.420327365398407, | |
"eval_logps/rejected": -0.7902651429176331, | |
"eval_loss": 0.7682384252548218, | |
"eval_rewards/accuracies": 0.7159090638160706, | |
"eval_rewards/chosen": -0.6304910182952881, | |
"eval_rewards/margins": 0.5549066662788391, | |
"eval_rewards/rejected": -1.185397744178772, | |
"eval_runtime": 24.4318, | |
"eval_samples_per_second": 28.815, | |
"eval_steps_per_second": 3.602, | |
"step": 250 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 6.082014861863158e+17, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |