|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.6523511823865181, |
|
"eval_steps": 50, |
|
"global_step": 900, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.007248346470961312, |
|
"grad_norm": 0.07235438376665115, |
|
"learning_rate": 4.998766400914329e-06, |
|
"logits/chosen": -1.9785633087158203, |
|
"logits/rejected": -2.5380234718322754, |
|
"logps/chosen": -0.28137442469596863, |
|
"logps/rejected": -0.37793582677841187, |
|
"loss": 7.3908, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.42206162214279175, |
|
"rewards/margins": 0.14484205842018127, |
|
"rewards/rejected": -0.5669037103652954, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.014496692941922623, |
|
"grad_norm": 0.07455909997224808, |
|
"learning_rate": 4.9950668210706795e-06, |
|
"logits/chosen": -2.0576319694519043, |
|
"logits/rejected": -2.499069929122925, |
|
"logps/chosen": -0.2769497036933899, |
|
"logps/rejected": -0.33560773730278015, |
|
"loss": 7.3753, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -0.41542449593544006, |
|
"rewards/margins": 0.08798708021640778, |
|
"rewards/rejected": -0.503411591053009, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.021745039412883936, |
|
"grad_norm": 0.09561269730329514, |
|
"learning_rate": 4.9889049115077e-06, |
|
"logits/chosen": -2.1018643379211426, |
|
"logits/rejected": -2.377673625946045, |
|
"logps/chosen": -0.2673099935054779, |
|
"logps/rejected": -0.3057115972042084, |
|
"loss": 7.4516, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": -0.40096497535705566, |
|
"rewards/margins": 0.05760239437222481, |
|
"rewards/rejected": -0.45856744050979614, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.028993385883845247, |
|
"grad_norm": 0.08169445395469666, |
|
"learning_rate": 4.980286753286196e-06, |
|
"logits/chosen": -2.1627352237701416, |
|
"logits/rejected": -2.3873391151428223, |
|
"logps/chosen": -0.27636194229125977, |
|
"logps/rejected": -0.3703404664993286, |
|
"loss": 7.3888, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.4145428538322449, |
|
"rewards/margins": 0.14096775650978088, |
|
"rewards/rejected": -0.5555106401443481, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03624173235480656, |
|
"grad_norm": 0.08877147734165192, |
|
"learning_rate": 4.9692208514878445e-06, |
|
"logits/chosen": -2.1400368213653564, |
|
"logits/rejected": -2.44627046585083, |
|
"logps/chosen": -0.25384199619293213, |
|
"logps/rejected": -0.309225857257843, |
|
"loss": 7.518, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": -0.3807629942893982, |
|
"rewards/margins": 0.08307582885026932, |
|
"rewards/rejected": -0.4638388752937317, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03624173235480656, |
|
"eval_logits/chosen": -2.1149837970733643, |
|
"eval_logits/rejected": -2.4759538173675537, |
|
"eval_logps/chosen": -0.28288090229034424, |
|
"eval_logps/rejected": -0.3434317111968994, |
|
"eval_loss": 0.9120966196060181, |
|
"eval_rewards/accuracies": 0.5089285969734192, |
|
"eval_rewards/chosen": -0.42432135343551636, |
|
"eval_rewards/margins": 0.09082622081041336, |
|
"eval_rewards/rejected": -0.5151475667953491, |
|
"eval_runtime": 30.2473, |
|
"eval_samples_per_second": 29.49, |
|
"eval_steps_per_second": 3.703, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04349007882576787, |
|
"grad_norm": 0.11774340271949768, |
|
"learning_rate": 4.9557181268217225e-06, |
|
"logits/chosen": -2.0567967891693115, |
|
"logits/rejected": -2.545668125152588, |
|
"logps/chosen": -0.2460743933916092, |
|
"logps/rejected": -0.32390663027763367, |
|
"loss": 7.3929, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": -0.3691115975379944, |
|
"rewards/margins": 0.1167483925819397, |
|
"rewards/rejected": -0.4858599603176117, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.05073842529672919, |
|
"grad_norm": 0.10012238472700119, |
|
"learning_rate": 4.939791904846869e-06, |
|
"logits/chosen": -1.9286606311798096, |
|
"logits/rejected": -2.4620895385742188, |
|
"logps/chosen": -0.25992274284362793, |
|
"logps/rejected": -0.3665553629398346, |
|
"loss": 7.1863, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.3898841440677643, |
|
"rewards/margins": 0.15994893014431, |
|
"rewards/rejected": -0.5498330593109131, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.057986771767690494, |
|
"grad_norm": 0.07387609034776688, |
|
"learning_rate": 4.921457902821578e-06, |
|
"logits/chosen": -2.0778262615203857, |
|
"logits/rejected": -2.4614813327789307, |
|
"logps/chosen": -0.24521782994270325, |
|
"logps/rejected": -0.3450419306755066, |
|
"loss": 7.3662, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.3678267002105713, |
|
"rewards/margins": 0.1497361809015274, |
|
"rewards/rejected": -0.5175628662109375, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.0652351182386518, |
|
"grad_norm": 0.09718403220176697, |
|
"learning_rate": 4.900734214192358e-06, |
|
"logits/chosen": -1.998186707496643, |
|
"logits/rejected": -2.4382071495056152, |
|
"logps/chosen": -0.24080593883991241, |
|
"logps/rejected": -0.3280099332332611, |
|
"loss": 7.268, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.36120888590812683, |
|
"rewards/margins": 0.13080602884292603, |
|
"rewards/rejected": -0.49201488494873047, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.07248346470961312, |
|
"grad_norm": 0.08059138059616089, |
|
"learning_rate": 4.8776412907378845e-06, |
|
"logits/chosen": -1.894728422164917, |
|
"logits/rejected": -2.475407838821411, |
|
"logps/chosen": -0.2019994705915451, |
|
"logps/rejected": -0.29733040928840637, |
|
"loss": 7.2544, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.30299919843673706, |
|
"rewards/margins": 0.14299637079238892, |
|
"rewards/rejected": -0.445995569229126, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07248346470961312, |
|
"eval_logits/chosen": -2.1233773231506348, |
|
"eval_logits/rejected": -2.4810428619384766, |
|
"eval_logps/chosen": -0.24400465190410614, |
|
"eval_logps/rejected": -0.3260345160961151, |
|
"eval_loss": 0.8920583724975586, |
|
"eval_rewards/accuracies": 0.5625, |
|
"eval_rewards/chosen": -0.3660070598125458, |
|
"eval_rewards/margins": 0.12304472178220749, |
|
"eval_rewards/rejected": -0.4890517592430115, |
|
"eval_runtime": 30.28, |
|
"eval_samples_per_second": 29.458, |
|
"eval_steps_per_second": 3.699, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07973181118057443, |
|
"grad_norm": 0.060006480664014816, |
|
"learning_rate": 4.852201922385564e-06, |
|
"logits/chosen": -2.1128883361816406, |
|
"logits/rejected": -2.5161118507385254, |
|
"logps/chosen": -0.23731942474842072, |
|
"logps/rejected": -0.31481316685676575, |
|
"loss": 7.1762, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.3559790849685669, |
|
"rewards/margins": 0.11624068021774292, |
|
"rewards/rejected": -0.4722197651863098, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.08698015765153574, |
|
"grad_norm": 0.0602690726518631, |
|
"learning_rate": 4.824441214720629e-06, |
|
"logits/chosen": -2.0212602615356445, |
|
"logits/rejected": -2.4894890785217285, |
|
"logps/chosen": -0.18956038355827332, |
|
"logps/rejected": -0.31935763359069824, |
|
"loss": 7.1442, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.2843405604362488, |
|
"rewards/margins": 0.19469590485095978, |
|
"rewards/rejected": -0.47903648018836975, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.09422850412249706, |
|
"grad_norm": 0.06425223499536514, |
|
"learning_rate": 4.794386564209953e-06, |
|
"logits/chosen": -2.06717848777771, |
|
"logits/rejected": -2.5086405277252197, |
|
"logps/chosen": -0.2075866460800171, |
|
"logps/rejected": -0.3124083876609802, |
|
"loss": 7.1449, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.31137990951538086, |
|
"rewards/margins": 0.15723267197608948, |
|
"rewards/rejected": -0.46861258149147034, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.10147685059345837, |
|
"grad_norm": 0.08793757855892181, |
|
"learning_rate": 4.762067631165049e-06, |
|
"logits/chosen": -2.0242953300476074, |
|
"logits/rejected": -2.4943737983703613, |
|
"logps/chosen": -0.19989363849163055, |
|
"logps/rejected": -0.32057636976242065, |
|
"loss": 7.122, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.29984045028686523, |
|
"rewards/margins": 0.18102414906024933, |
|
"rewards/rejected": -0.48086461424827576, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.10872519706441967, |
|
"grad_norm": 0.09698841720819473, |
|
"learning_rate": 4.72751631047092e-06, |
|
"logits/chosen": -2.098658800125122, |
|
"logits/rejected": -2.582111120223999, |
|
"logps/chosen": -0.18897351622581482, |
|
"logps/rejected": -0.363709419965744, |
|
"loss": 7.1253, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.28346025943756104, |
|
"rewards/margins": 0.2621038556098938, |
|
"rewards/rejected": -0.5455641150474548, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.10872519706441967, |
|
"eval_logits/chosen": -2.1921768188476562, |
|
"eval_logits/rejected": -2.556370973587036, |
|
"eval_logps/chosen": -0.22214026749134064, |
|
"eval_logps/rejected": -0.3240993022918701, |
|
"eval_loss": 0.8764163851737976, |
|
"eval_rewards/accuracies": 0.5803571343421936, |
|
"eval_rewards/chosen": -0.33321040868759155, |
|
"eval_rewards/margins": 0.15293852984905243, |
|
"eval_rewards/rejected": -0.48614898324012756, |
|
"eval_runtime": 30.273, |
|
"eval_samples_per_second": 29.465, |
|
"eval_steps_per_second": 3.7, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.11597354353538099, |
|
"grad_norm": 0.09119638800621033, |
|
"learning_rate": 4.690766700109659e-06, |
|
"logits/chosen": -2.2321066856384277, |
|
"logits/rejected": -2.5475690364837646, |
|
"logps/chosen": -0.19602122902870178, |
|
"logps/rejected": -0.27682799100875854, |
|
"loss": 7.0307, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": -0.2940318286418915, |
|
"rewards/margins": 0.12121014297008514, |
|
"rewards/rejected": -0.4152420163154602, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.1232218900063423, |
|
"grad_norm": 0.11496366560459137, |
|
"learning_rate": 4.65185506750986e-06, |
|
"logits/chosen": -2.1506271362304688, |
|
"logits/rejected": -2.506520986557007, |
|
"logps/chosen": -0.18018664419651031, |
|
"logps/rejected": -0.3084454834461212, |
|
"loss": 7.1652, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.27028003334999084, |
|
"rewards/margins": 0.19238826632499695, |
|
"rewards/rejected": -0.462668240070343, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.1304702364773036, |
|
"grad_norm": 0.09201692789793015, |
|
"learning_rate": 4.610819813755038e-06, |
|
"logits/chosen": -2.285245895385742, |
|
"logits/rejected": -2.573941707611084, |
|
"logps/chosen": -0.1844937801361084, |
|
"logps/rejected": -0.3112415075302124, |
|
"loss": 7.0022, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.2767406404018402, |
|
"rewards/margins": 0.1901216208934784, |
|
"rewards/rejected": -0.4668622612953186, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.13771858294826492, |
|
"grad_norm": 0.13191230595111847, |
|
"learning_rate": 4.567701435686405e-06, |
|
"logits/chosen": -2.265538454055786, |
|
"logits/rejected": -2.569638252258301, |
|
"logps/chosen": -0.21122264862060547, |
|
"logps/rejected": -0.3469196856021881, |
|
"loss": 7.004, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.3168340027332306, |
|
"rewards/margins": 0.20354552567005157, |
|
"rewards/rejected": -0.5203795433044434, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.14496692941922623, |
|
"grad_norm": 0.1503789722919464, |
|
"learning_rate": 4.522542485937369e-06, |
|
"logits/chosen": -2.2021026611328125, |
|
"logits/rejected": -2.655961513519287, |
|
"logps/chosen": -0.19664816558361053, |
|
"logps/rejected": -0.34679359197616577, |
|
"loss": 7.0539, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.2949722409248352, |
|
"rewards/margins": 0.22521813213825226, |
|
"rewards/rejected": -0.5201903581619263, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.14496692941922623, |
|
"eval_logits/chosen": -2.325611114501953, |
|
"eval_logits/rejected": -2.715676784515381, |
|
"eval_logps/chosen": -0.2261900007724762, |
|
"eval_logps/rejected": -0.3570065200328827, |
|
"eval_loss": 0.8565592169761658, |
|
"eval_rewards/accuracies": 0.5714285969734192, |
|
"eval_rewards/chosen": -0.3392849862575531, |
|
"eval_rewards/margins": 0.19622473418712616, |
|
"eval_rewards/rejected": -0.5355097055435181, |
|
"eval_runtime": 30.2681, |
|
"eval_samples_per_second": 29.47, |
|
"eval_steps_per_second": 3.7, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.15221527589018755, |
|
"grad_norm": 0.12589682638645172, |
|
"learning_rate": 4.475387530939226e-06, |
|
"logits/chosen": -2.24491810798645, |
|
"logits/rejected": -2.739323616027832, |
|
"logps/chosen": -0.19889305531978607, |
|
"logps/rejected": -0.32832199335098267, |
|
"loss": 6.9013, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.2983395457267761, |
|
"rewards/margins": 0.1941433995962143, |
|
"rewards/rejected": -0.492482990026474, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.15946362236114886, |
|
"grad_norm": 0.1417039930820465, |
|
"learning_rate": 4.426283106939474e-06, |
|
"logits/chosen": -2.278883695602417, |
|
"logits/rejected": -2.7108216285705566, |
|
"logps/chosen": -0.2170490026473999, |
|
"logps/rejected": -0.3783469796180725, |
|
"loss": 6.9366, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.32557350397109985, |
|
"rewards/margins": 0.2419470101594925, |
|
"rewards/rejected": -0.5675204992294312, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.16671196883211017, |
|
"grad_norm": 0.1472843438386917, |
|
"learning_rate": 4.3752776740761495e-06, |
|
"logits/chosen": -2.16560697555542, |
|
"logits/rejected": -2.7911431789398193, |
|
"logps/chosen": -0.20280452072620392, |
|
"logps/rejected": -0.3874067962169647, |
|
"loss": 6.8823, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.3042067885398865, |
|
"rewards/margins": 0.2769034504890442, |
|
"rewards/rejected": -0.5811101794242859, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.1739603153030715, |
|
"grad_norm": 0.15212437510490417, |
|
"learning_rate": 4.322421568553529e-06, |
|
"logits/chosen": -2.2929282188415527, |
|
"logits/rejected": -2.8649649620056152, |
|
"logps/chosen": -0.1841541975736618, |
|
"logps/rejected": -0.3838128447532654, |
|
"loss": 6.7139, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.2762312889099121, |
|
"rewards/margins": 0.29948797821998596, |
|
"rewards/rejected": -0.5757192373275757, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.1812086617740328, |
|
"grad_norm": 0.22261729836463928, |
|
"learning_rate": 4.267766952966369e-06, |
|
"logits/chosen": -2.436549663543701, |
|
"logits/rejected": -2.827937126159668, |
|
"logps/chosen": -0.2322833091020584, |
|
"logps/rejected": -0.39587104320526123, |
|
"loss": 6.7737, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.3484249711036682, |
|
"rewards/margins": 0.24538159370422363, |
|
"rewards/rejected": -0.5938066244125366, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.1812086617740328, |
|
"eval_logits/chosen": -2.4983742237091064, |
|
"eval_logits/rejected": -2.8988120555877686, |
|
"eval_logps/chosen": -0.269414484500885, |
|
"eval_logps/rejected": -0.45964303612709045, |
|
"eval_loss": 0.816527247428894, |
|
"eval_rewards/accuracies": 0.5714285969734192, |
|
"eval_rewards/chosen": -0.4041217863559723, |
|
"eval_rewards/margins": 0.2853427231311798, |
|
"eval_rewards/rejected": -0.6894644498825073, |
|
"eval_runtime": 30.2705, |
|
"eval_samples_per_second": 29.468, |
|
"eval_steps_per_second": 3.7, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.18845700824499412, |
|
"grad_norm": 0.25962531566619873, |
|
"learning_rate": 4.211367764821722e-06, |
|
"logits/chosen": -2.3911795616149902, |
|
"logits/rejected": -2.9030754566192627, |
|
"logps/chosen": -0.23631009459495544, |
|
"logps/rejected": -0.44218096137046814, |
|
"loss": 6.6574, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.35446515679359436, |
|
"rewards/margins": 0.3088063597679138, |
|
"rewards/rejected": -0.6632715463638306, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.19570535471595543, |
|
"grad_norm": 0.29614314436912537, |
|
"learning_rate": 4.15327966330913e-06, |
|
"logits/chosen": -2.39563250541687, |
|
"logits/rejected": -2.8926572799682617, |
|
"logps/chosen": -0.2558010220527649, |
|
"logps/rejected": -0.5495272278785706, |
|
"loss": 6.6017, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.38370150327682495, |
|
"rewards/margins": 0.4405893385410309, |
|
"rewards/rejected": -0.8242908716201782, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.20295370118691675, |
|
"grad_norm": 0.2586583197116852, |
|
"learning_rate": 4.093559974371725e-06, |
|
"logits/chosen": -2.5368762016296387, |
|
"logits/rejected": -2.8856003284454346, |
|
"logps/chosen": -0.2823846638202667, |
|
"logps/rejected": -0.4850040376186371, |
|
"loss": 6.3849, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.4235769808292389, |
|
"rewards/margins": 0.30392909049987793, |
|
"rewards/rejected": -0.7275060415267944, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.21020204765787803, |
|
"grad_norm": 0.3820374608039856, |
|
"learning_rate": 4.032267634132442e-06, |
|
"logits/chosen": -2.404680013656616, |
|
"logits/rejected": -2.8940536975860596, |
|
"logps/chosen": -0.3108128011226654, |
|
"logps/rejected": -0.7274529337882996, |
|
"loss": 6.0103, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.46621912717819214, |
|
"rewards/margins": 0.6249603033065796, |
|
"rewards/rejected": -1.0911794900894165, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.21745039412883935, |
|
"grad_norm": 0.572814404964447, |
|
"learning_rate": 3.969463130731183e-06, |
|
"logits/chosen": -2.3861801624298096, |
|
"logits/rejected": -2.8026037216186523, |
|
"logps/chosen": -0.40264564752578735, |
|
"logps/rejected": -0.8128012418746948, |
|
"loss": 5.9678, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.6039685010910034, |
|
"rewards/margins": 0.6152334809303284, |
|
"rewards/rejected": -1.219201922416687, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.21745039412883935, |
|
"eval_logits/chosen": -2.519517421722412, |
|
"eval_logits/rejected": -2.8339390754699707, |
|
"eval_logps/chosen": -0.4459304213523865, |
|
"eval_logps/rejected": -0.8040717840194702, |
|
"eval_loss": 0.7266466617584229, |
|
"eval_rewards/accuracies": 0.5625, |
|
"eval_rewards/chosen": -0.6688956618309021, |
|
"eval_rewards/margins": 0.5372119545936584, |
|
"eval_rewards/rejected": -1.206107497215271, |
|
"eval_runtime": 30.2692, |
|
"eval_samples_per_second": 29.469, |
|
"eval_steps_per_second": 3.7, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.22469874059980066, |
|
"grad_norm": 0.3507815897464752, |
|
"learning_rate": 3.905208444630326e-06, |
|
"logits/chosen": -2.4855399131774902, |
|
"logits/rejected": -2.8341915607452393, |
|
"logps/chosen": -0.36750203371047974, |
|
"logps/rejected": -0.8827482461929321, |
|
"loss": 5.5511, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.5512530207633972, |
|
"rewards/margins": 0.7728692889213562, |
|
"rewards/rejected": -1.324122428894043, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.23194708707076198, |
|
"grad_norm": 0.34762975573539734, |
|
"learning_rate": 3.839566987447492e-06, |
|
"logits/chosen": -2.312408924102783, |
|
"logits/rejected": -2.7844693660736084, |
|
"logps/chosen": -0.465009868144989, |
|
"logps/rejected": -1.074065923690796, |
|
"loss": 5.5495, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.6975148320198059, |
|
"rewards/margins": 0.9135842323303223, |
|
"rewards/rejected": -1.6110990047454834, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.2391954335417233, |
|
"grad_norm": 0.5242094397544861, |
|
"learning_rate": 3.772603539375929e-06, |
|
"logits/chosen": -2.4517929553985596, |
|
"logits/rejected": -2.7228286266326904, |
|
"logps/chosen": -0.5588141679763794, |
|
"logps/rejected": -1.1725430488586426, |
|
"loss": 5.526, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.8382211923599243, |
|
"rewards/margins": 0.9205933809280396, |
|
"rewards/rejected": -1.7588145732879639, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.2464437800126846, |
|
"grad_norm": 0.7856224179267883, |
|
"learning_rate": 3.7043841852542884e-06, |
|
"logits/chosen": -2.5230085849761963, |
|
"logits/rejected": -2.8241894245147705, |
|
"logps/chosen": -0.527363657951355, |
|
"logps/rejected": -1.1461597681045532, |
|
"loss": 5.0865, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.7910455465316772, |
|
"rewards/margins": 0.9281940460205078, |
|
"rewards/rejected": -1.7192394733428955, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.2536921264836459, |
|
"grad_norm": 0.5080834031105042, |
|
"learning_rate": 3.634976249348867e-06, |
|
"logits/chosen": -2.414911985397339, |
|
"logits/rejected": -2.8537158966064453, |
|
"logps/chosen": -0.5747151374816895, |
|
"logps/rejected": -1.4558517932891846, |
|
"loss": 5.0036, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.862072765827179, |
|
"rewards/margins": 1.3217047452926636, |
|
"rewards/rejected": -2.1837775707244873, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.2536921264836459, |
|
"eval_logits/chosen": -2.647620677947998, |
|
"eval_logits/rejected": -2.910278558731079, |
|
"eval_logps/chosen": -0.6226872205734253, |
|
"eval_logps/rejected": -1.3186976909637451, |
|
"eval_loss": 0.6240565776824951, |
|
"eval_rewards/accuracies": 0.5982142686843872, |
|
"eval_rewards/chosen": -0.9340308308601379, |
|
"eval_rewards/margins": 1.0440157651901245, |
|
"eval_rewards/rejected": -1.9780464172363281, |
|
"eval_runtime": 30.2634, |
|
"eval_samples_per_second": 29.475, |
|
"eval_steps_per_second": 3.701, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.2609404729546072, |
|
"grad_norm": 0.5225710868835449, |
|
"learning_rate": 3.564448228912682e-06, |
|
"logits/chosen": -2.5971312522888184, |
|
"logits/rejected": -2.8506340980529785, |
|
"logps/chosen": -0.6313252449035645, |
|
"logps/rejected": -1.4177578687667847, |
|
"loss": 5.2602, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.9469879269599915, |
|
"rewards/margins": 1.179648995399475, |
|
"rewards/rejected": -2.1266369819641113, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.26818881942556855, |
|
"grad_norm": 0.44064539670944214, |
|
"learning_rate": 3.4928697265869516e-06, |
|
"logits/chosen": -2.645890712738037, |
|
"logits/rejected": -2.8477931022644043, |
|
"logps/chosen": -0.6448010206222534, |
|
"logps/rejected": -1.6615798473358154, |
|
"loss": 4.9912, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.9672015905380249, |
|
"rewards/margins": 1.5251682996749878, |
|
"rewards/rejected": -2.492370128631592, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.27543716589652983, |
|
"grad_norm": 0.6242617964744568, |
|
"learning_rate": 3.4203113817116955e-06, |
|
"logits/chosen": -2.685136318206787, |
|
"logits/rejected": -2.8105804920196533, |
|
"logps/chosen": -0.6792098879814148, |
|
"logps/rejected": -1.753614068031311, |
|
"loss": 4.7972, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.0188149213790894, |
|
"rewards/margins": 1.611606240272522, |
|
"rewards/rejected": -2.6304211616516113, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.2826855123674912, |
|
"grad_norm": 0.5383561253547668, |
|
"learning_rate": 3.346844800613229e-06, |
|
"logits/chosen": -2.6124706268310547, |
|
"logits/rejected": -2.8373632431030273, |
|
"logps/chosen": -0.6230086088180542, |
|
"logps/rejected": -2.398132801055908, |
|
"loss": 4.7275, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.9345127940177917, |
|
"rewards/margins": 2.662686824798584, |
|
"rewards/rejected": -3.5971992015838623, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.28993385883845246, |
|
"grad_norm": 0.6497470736503601, |
|
"learning_rate": 3.272542485937369e-06, |
|
"logits/chosen": -2.5315146446228027, |
|
"logits/rejected": -2.892939805984497, |
|
"logps/chosen": -0.7851268649101257, |
|
"logps/rejected": -2.1644442081451416, |
|
"loss": 4.5874, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -1.1776902675628662, |
|
"rewards/margins": 2.0689759254455566, |
|
"rewards/rejected": -3.246666431427002, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.28993385883845246, |
|
"eval_logits/chosen": -2.704826831817627, |
|
"eval_logits/rejected": -2.969177007675171, |
|
"eval_logps/chosen": -0.7823955416679382, |
|
"eval_logps/rejected": -2.0450375080108643, |
|
"eval_loss": 0.5390450358390808, |
|
"eval_rewards/accuracies": 0.5982142686843872, |
|
"eval_rewards/chosen": -1.1735934019088745, |
|
"eval_rewards/margins": 1.8939628601074219, |
|
"eval_rewards/rejected": -3.067556142807007, |
|
"eval_runtime": 30.267, |
|
"eval_samples_per_second": 29.471, |
|
"eval_steps_per_second": 3.7, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.2971822053094138, |
|
"grad_norm": 0.5891414880752563, |
|
"learning_rate": 3.1974777650980737e-06, |
|
"logits/chosen": -2.6691222190856934, |
|
"logits/rejected": -2.9309957027435303, |
|
"logps/chosen": -0.7303667068481445, |
|
"logps/rejected": -2.1775012016296387, |
|
"loss": 4.3201, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -1.0955501794815063, |
|
"rewards/margins": 2.170701503753662, |
|
"rewards/rejected": -3.2662513256073, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.3044305517803751, |
|
"grad_norm": 1.0978150367736816, |
|
"learning_rate": 3.121724717912138e-06, |
|
"logits/chosen": -2.57599139213562, |
|
"logits/rejected": -2.839123249053955, |
|
"logps/chosen": -0.7009271383285522, |
|
"logps/rejected": -2.5759215354919434, |
|
"loss": 4.1763, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -1.0513907670974731, |
|
"rewards/margins": 2.8124916553497314, |
|
"rewards/rejected": -3.863882541656494, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.31167889825133643, |
|
"grad_norm": 0.5167409777641296, |
|
"learning_rate": 3.045358103491357e-06, |
|
"logits/chosen": -2.6339075565338135, |
|
"logits/rejected": -2.956376314163208, |
|
"logps/chosen": -0.84259432554245, |
|
"logps/rejected": -2.8900070190429688, |
|
"loss": 4.1528, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -1.2638914585113525, |
|
"rewards/margins": 3.0711190700531006, |
|
"rewards/rejected": -4.335010528564453, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.3189272447222977, |
|
"grad_norm": 0.5213866829872131, |
|
"learning_rate": 2.9684532864643123e-06, |
|
"logits/chosen": -2.469393014907837, |
|
"logits/rejected": -2.858123302459717, |
|
"logps/chosen": -0.9314866065979004, |
|
"logps/rejected": -3.1660075187683105, |
|
"loss": 3.7696, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.3972299098968506, |
|
"rewards/margins": 3.3517813682556152, |
|
"rewards/rejected": -4.749011516571045, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.32617559119325906, |
|
"grad_norm": 0.47010573744773865, |
|
"learning_rate": 2.8910861626005774e-06, |
|
"logits/chosen": -2.497013568878174, |
|
"logits/rejected": -2.8785929679870605, |
|
"logps/chosen": -0.7696805596351624, |
|
"logps/rejected": -3.087029457092285, |
|
"loss": 4.3625, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.1545207500457764, |
|
"rewards/margins": 3.4760234355926514, |
|
"rewards/rejected": -4.630544185638428, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.32617559119325906, |
|
"eval_logits/chosen": -2.6146204471588135, |
|
"eval_logits/rejected": -2.9200844764709473, |
|
"eval_logps/chosen": -0.8188837766647339, |
|
"eval_logps/rejected": -2.42936110496521, |
|
"eval_loss": 0.49255281686782837, |
|
"eval_rewards/accuracies": 0.6339285969734192, |
|
"eval_rewards/chosen": -1.2283254861831665, |
|
"eval_rewards/margins": 2.4157161712646484, |
|
"eval_rewards/rejected": -3.6440417766571045, |
|
"eval_runtime": 30.2604, |
|
"eval_samples_per_second": 29.478, |
|
"eval_steps_per_second": 3.701, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.33342393766422035, |
|
"grad_norm": 0.8721221685409546, |
|
"learning_rate": 2.813333083910761e-06, |
|
"logits/chosen": -2.593501567840576, |
|
"logits/rejected": -2.9277596473693848, |
|
"logps/chosen": -0.8970834016799927, |
|
"logps/rejected": -3.1408064365386963, |
|
"loss": 3.9344, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -1.3456249237060547, |
|
"rewards/margins": 3.3655846118927, |
|
"rewards/rejected": -4.711209774017334, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.34067228413518164, |
|
"grad_norm": 0.9014137983322144, |
|
"learning_rate": 2.7352707832962865e-06, |
|
"logits/chosen": -2.5280098915100098, |
|
"logits/rejected": -2.828508138656616, |
|
"logps/chosen": -0.9087240099906921, |
|
"logps/rejected": -2.842564105987549, |
|
"loss": 3.8492, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.3630859851837158, |
|
"rewards/margins": 2.9007601737976074, |
|
"rewards/rejected": -4.263846397399902, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.347920630606143, |
|
"grad_norm": 1.2404453754425049, |
|
"learning_rate": 2.6569762988232838e-06, |
|
"logits/chosen": -2.408717393875122, |
|
"logits/rejected": -2.7902934551239014, |
|
"logps/chosen": -1.1201987266540527, |
|
"logps/rejected": -3.879173994064331, |
|
"loss": 3.5822, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -1.6802982091903687, |
|
"rewards/margins": 4.138463020324707, |
|
"rewards/rejected": -5.818760871887207, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.35516897707710426, |
|
"grad_norm": 0.49635380506515503, |
|
"learning_rate": 2.578526897695321e-06, |
|
"logits/chosen": -2.422421455383301, |
|
"logits/rejected": -2.8097214698791504, |
|
"logps/chosen": -1.0756808519363403, |
|
"logps/rejected": -3.880105495452881, |
|
"loss": 3.8437, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.6135212182998657, |
|
"rewards/margins": 4.206637382507324, |
|
"rewards/rejected": -5.8201584815979, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.3624173235480656, |
|
"grad_norm": 0.5894991755485535, |
|
"learning_rate": 2.5e-06, |
|
"logits/chosen": -2.443977117538452, |
|
"logits/rejected": -2.734081983566284, |
|
"logps/chosen": -1.1097285747528076, |
|
"logps/rejected": -3.500363826751709, |
|
"loss": 3.8597, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.664592981338501, |
|
"rewards/margins": 3.5859527587890625, |
|
"rewards/rejected": -5.250545501708984, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.3624173235480656, |
|
"eval_logits/chosen": -2.4630043506622314, |
|
"eval_logits/rejected": -2.7617809772491455, |
|
"eval_logps/chosen": -1.2134082317352295, |
|
"eval_logps/rejected": -3.330636739730835, |
|
"eval_loss": 0.45747870206832886, |
|
"eval_rewards/accuracies": 0.6517857313156128, |
|
"eval_rewards/chosen": -1.8201123476028442, |
|
"eval_rewards/margins": 3.1758430004119873, |
|
"eval_rewards/rejected": -4.995955467224121, |
|
"eval_runtime": 30.254, |
|
"eval_samples_per_second": 29.484, |
|
"eval_steps_per_second": 3.702, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.3696656700190269, |
|
"grad_norm": 0.844010591506958, |
|
"learning_rate": 2.4214731023046795e-06, |
|
"logits/chosen": -2.3450961112976074, |
|
"logits/rejected": -2.7201991081237793, |
|
"logps/chosen": -1.1768486499786377, |
|
"logps/rejected": -4.009206295013428, |
|
"loss": 3.6558, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.765273094177246, |
|
"rewards/margins": 4.248536586761475, |
|
"rewards/rejected": -6.0138092041015625, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.37691401648998824, |
|
"grad_norm": 0.6226333379745483, |
|
"learning_rate": 2.3430237011767166e-06, |
|
"logits/chosen": -2.3935933113098145, |
|
"logits/rejected": -2.7934298515319824, |
|
"logps/chosen": -1.06581711769104, |
|
"logps/rejected": -3.6901473999023438, |
|
"loss": 3.8086, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.59872567653656, |
|
"rewards/margins": 3.936495304107666, |
|
"rewards/rejected": -5.535221099853516, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.3841623629609495, |
|
"grad_norm": 0.8458526134490967, |
|
"learning_rate": 2.2647292167037143e-06, |
|
"logits/chosen": -2.4400205612182617, |
|
"logits/rejected": -2.767524003982544, |
|
"logps/chosen": -1.2901079654693604, |
|
"logps/rejected": -4.540966033935547, |
|
"loss": 3.3647, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -1.935161828994751, |
|
"rewards/margins": 4.87628698348999, |
|
"rewards/rejected": -6.811448097229004, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.39141070943191086, |
|
"grad_norm": 1.2383215427398682, |
|
"learning_rate": 2.186666916089239e-06, |
|
"logits/chosen": -2.4235644340515137, |
|
"logits/rejected": -2.823796272277832, |
|
"logps/chosen": -1.3300189971923828, |
|
"logps/rejected": -4.055625915527344, |
|
"loss": 3.6972, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -1.9950287342071533, |
|
"rewards/margins": 4.088409900665283, |
|
"rewards/rejected": -6.083438873291016, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.39865905590287215, |
|
"grad_norm": 1.0018137693405151, |
|
"learning_rate": 2.1089138373994226e-06, |
|
"logits/chosen": -2.498884677886963, |
|
"logits/rejected": -2.8074941635131836, |
|
"logps/chosen": -1.4506316184997559, |
|
"logps/rejected": -3.9293131828308105, |
|
"loss": 3.6202, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -2.175947427749634, |
|
"rewards/margins": 3.718022108078003, |
|
"rewards/rejected": -5.893969535827637, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.39865905590287215, |
|
"eval_logits/chosen": -2.491316556930542, |
|
"eval_logits/rejected": -2.792928457260132, |
|
"eval_logps/chosen": -1.7172259092330933, |
|
"eval_logps/rejected": -4.1004533767700195, |
|
"eval_loss": 0.43277421593666077, |
|
"eval_rewards/accuracies": 0.6875, |
|
"eval_rewards/chosen": -2.575838804244995, |
|
"eval_rewards/margins": 3.574841260910034, |
|
"eval_rewards/rejected": -6.1506805419921875, |
|
"eval_runtime": 30.2374, |
|
"eval_samples_per_second": 29.5, |
|
"eval_steps_per_second": 3.704, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.4059074023738335, |
|
"grad_norm": 0.903642475605011, |
|
"learning_rate": 2.031546713535688e-06, |
|
"logits/chosen": -2.452147960662842, |
|
"logits/rejected": -2.799231767654419, |
|
"logps/chosen": -1.7636197805404663, |
|
"logps/rejected": -4.704730033874512, |
|
"loss": 3.2952, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -2.645429849624634, |
|
"rewards/margins": 4.411664962768555, |
|
"rewards/rejected": -7.057094573974609, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.4131557488447948, |
|
"grad_norm": 1.289736032485962, |
|
"learning_rate": 1.9546418965086444e-06, |
|
"logits/chosen": -2.556669235229492, |
|
"logits/rejected": -2.833976984024048, |
|
"logps/chosen": -1.901815414428711, |
|
"logps/rejected": -4.644237518310547, |
|
"loss": 3.3085, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -2.852722644805908, |
|
"rewards/margins": 4.113633632659912, |
|
"rewards/rejected": -6.9663567543029785, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.42040409531575607, |
|
"grad_norm": 1.4252225160598755, |
|
"learning_rate": 1.8782752820878636e-06, |
|
"logits/chosen": -2.364348888397217, |
|
"logits/rejected": -2.9077506065368652, |
|
"logps/chosen": -2.175013780593872, |
|
"logps/rejected": -6.0506911277771, |
|
"loss": 3.2304, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -3.2625205516815186, |
|
"rewards/margins": 5.813515663146973, |
|
"rewards/rejected": -9.07603645324707, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.4276524417867174, |
|
"grad_norm": 1.1662834882736206, |
|
"learning_rate": 1.8025222349019273e-06, |
|
"logits/chosen": -2.431143045425415, |
|
"logits/rejected": -2.8574650287628174, |
|
"logps/chosen": -2.1578497886657715, |
|
"logps/rejected": -5.375465393066406, |
|
"loss": 3.2736, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -3.2367749214172363, |
|
"rewards/margins": 4.826422691345215, |
|
"rewards/rejected": -8.063197135925293, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.4349007882576787, |
|
"grad_norm": 1.6105389595031738, |
|
"learning_rate": 1.7274575140626318e-06, |
|
"logits/chosen": -2.3821804523468018, |
|
"logits/rejected": -2.8510732650756836, |
|
"logps/chosen": -2.297203540802002, |
|
"logps/rejected": -5.809553146362305, |
|
"loss": 3.343, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -3.445805072784424, |
|
"rewards/margins": 5.268526077270508, |
|
"rewards/rejected": -8.714330673217773, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.4349007882576787, |
|
"eval_logits/chosen": -2.515763521194458, |
|
"eval_logits/rejected": -2.8498146533966064, |
|
"eval_logps/chosen": -2.3867976665496826, |
|
"eval_logps/rejected": -5.036753177642822, |
|
"eval_loss": 0.3846723139286041, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": -3.5801963806152344, |
|
"eval_rewards/margins": 3.974933385848999, |
|
"eval_rewards/rejected": -7.5551300048828125, |
|
"eval_runtime": 30.235, |
|
"eval_samples_per_second": 29.502, |
|
"eval_steps_per_second": 3.704, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.44214913472864004, |
|
"grad_norm": 1.5437382459640503, |
|
"learning_rate": 1.6531551993867717e-06, |
|
"logits/chosen": -2.5224862098693848, |
|
"logits/rejected": -2.862809896469116, |
|
"logps/chosen": -2.504312753677368, |
|
"logps/rejected": -5.347531318664551, |
|
"loss": 3.0472, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -3.7564690113067627, |
|
"rewards/margins": 4.264827728271484, |
|
"rewards/rejected": -8.021296501159668, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.4493974811996013, |
|
"grad_norm": 1.5009058713912964, |
|
"learning_rate": 1.5796886182883053e-06, |
|
"logits/chosen": -2.4633076190948486, |
|
"logits/rejected": -2.9186339378356934, |
|
"logps/chosen": -2.854260206222534, |
|
"logps/rejected": -6.636805057525635, |
|
"loss": 3.1013, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -4.281390190124512, |
|
"rewards/margins": 5.673816680908203, |
|
"rewards/rejected": -9.955206871032715, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.45664582767056267, |
|
"grad_norm": 1.2582628726959229, |
|
"learning_rate": 1.5071302734130488e-06, |
|
"logits/chosen": -2.5565848350524902, |
|
"logits/rejected": -2.8856873512268066, |
|
"logps/chosen": -2.7753074169158936, |
|
"logps/rejected": -5.89193058013916, |
|
"loss": 2.7849, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": -4.162961483001709, |
|
"rewards/margins": 4.674934387207031, |
|
"rewards/rejected": -8.837895393371582, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.46389417414152395, |
|
"grad_norm": 1.6643486022949219, |
|
"learning_rate": 1.4355517710873184e-06, |
|
"logits/chosen": -2.4163150787353516, |
|
"logits/rejected": -2.8351335525512695, |
|
"logps/chosen": -2.870678663253784, |
|
"logps/rejected": -6.636674404144287, |
|
"loss": 2.6839, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -4.306017875671387, |
|
"rewards/margins": 5.648994445800781, |
|
"rewards/rejected": -9.955012321472168, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.4711425206124853, |
|
"grad_norm": 0.9412183165550232, |
|
"learning_rate": 1.3650237506511333e-06, |
|
"logits/chosen": -2.511819362640381, |
|
"logits/rejected": -2.9430336952209473, |
|
"logps/chosen": -3.0111937522888184, |
|
"logps/rejected": -6.41957950592041, |
|
"loss": 2.884, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -4.516791343688965, |
|
"rewards/margins": 5.11257791519165, |
|
"rewards/rejected": -9.62936782836914, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.4711425206124853, |
|
"eval_logits/chosen": -2.5462071895599365, |
|
"eval_logits/rejected": -2.890479326248169, |
|
"eval_logps/chosen": -3.189256429672241, |
|
"eval_logps/rejected": -6.0319504737854, |
|
"eval_loss": 0.35537806153297424, |
|
"eval_rewards/accuracies": 0.8303571343421936, |
|
"eval_rewards/chosen": -4.7838850021362305, |
|
"eval_rewards/margins": 4.264040470123291, |
|
"eval_rewards/rejected": -9.04792594909668, |
|
"eval_runtime": 30.2531, |
|
"eval_samples_per_second": 29.485, |
|
"eval_steps_per_second": 3.702, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.4783908670834466, |
|
"grad_norm": 2.0148541927337646, |
|
"learning_rate": 1.2956158147457116e-06, |
|
"logits/chosen": -2.5816783905029297, |
|
"logits/rejected": -2.832157850265503, |
|
"logps/chosen": -2.9875688552856445, |
|
"logps/rejected": -6.420091152191162, |
|
"loss": 2.7163, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -4.481352806091309, |
|
"rewards/margins": 5.14878511428833, |
|
"rewards/rejected": -9.630138397216797, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.4856392135544079, |
|
"grad_norm": 2.247664451599121, |
|
"learning_rate": 1.2273964606240718e-06, |
|
"logits/chosen": -2.524240255355835, |
|
"logits/rejected": -2.936284065246582, |
|
"logps/chosen": -3.2474262714385986, |
|
"logps/rejected": -6.681307315826416, |
|
"loss": 2.9501, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -4.871139049530029, |
|
"rewards/margins": 5.150821685791016, |
|
"rewards/rejected": -10.021961212158203, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.4928875600253692, |
|
"grad_norm": 2.2333178520202637, |
|
"learning_rate": 1.160433012552508e-06, |
|
"logits/chosen": -2.5046608448028564, |
|
"logits/rejected": -2.889239549636841, |
|
"logps/chosen": -3.5039947032928467, |
|
"logps/rejected": -6.702293395996094, |
|
"loss": 2.777, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": -5.2559919357299805, |
|
"rewards/margins": 4.797448635101318, |
|
"rewards/rejected": -10.053441047668457, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.5001359064963306, |
|
"grad_norm": 2.373650074005127, |
|
"learning_rate": 1.0947915553696742e-06, |
|
"logits/chosen": -2.4730420112609863, |
|
"logits/rejected": -2.9917922019958496, |
|
"logps/chosen": -3.579982042312622, |
|
"logps/rejected": -7.557694435119629, |
|
"loss": 2.673, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -5.369973182678223, |
|
"rewards/margins": 5.9665679931640625, |
|
"rewards/rejected": -11.336541175842285, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.5073842529672918, |
|
"grad_norm": 5.8343729972839355, |
|
"learning_rate": 1.0305368692688175e-06, |
|
"logits/chosen": -2.479050397872925, |
|
"logits/rejected": -2.9097132682800293, |
|
"logps/chosen": -3.8712658882141113, |
|
"logps/rejected": -7.749911308288574, |
|
"loss": 2.6632, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": -5.806898593902588, |
|
"rewards/margins": 5.817968845367432, |
|
"rewards/rejected": -11.62486743927002, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.5073842529672918, |
|
"eval_logits/chosen": -2.528568744659424, |
|
"eval_logits/rejected": -2.898803949356079, |
|
"eval_logps/chosen": -3.8546855449676514, |
|
"eval_logps/rejected": -6.8783745765686035, |
|
"eval_loss": 0.32792916893959045, |
|
"eval_rewards/accuracies": 0.8482142686843872, |
|
"eval_rewards/chosen": -5.782027721405029, |
|
"eval_rewards/margins": 4.535533428192139, |
|
"eval_rewards/rejected": -10.317561149597168, |
|
"eval_runtime": 30.2435, |
|
"eval_samples_per_second": 29.494, |
|
"eval_steps_per_second": 3.703, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.5146325994382531, |
|
"grad_norm": 2.118319034576416, |
|
"learning_rate": 9.677323658675594e-07, |
|
"logits/chosen": -2.5124125480651855, |
|
"logits/rejected": -2.872951030731201, |
|
"logps/chosen": -3.9242005348205566, |
|
"logps/rejected": -7.064782619476318, |
|
"loss": 2.6968, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -5.886300086975098, |
|
"rewards/margins": 4.710873603820801, |
|
"rewards/rejected": -10.597173690795898, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.5218809459092144, |
|
"grad_norm": 2.57857346534729, |
|
"learning_rate": 9.064400256282757e-07, |
|
"logits/chosen": -2.442990779876709, |
|
"logits/rejected": -2.9133260250091553, |
|
"logps/chosen": -4.32869291305542, |
|
"logps/rejected": -7.873679161071777, |
|
"loss": 2.6929, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -6.493039608001709, |
|
"rewards/margins": 5.317479610443115, |
|
"rewards/rejected": -11.810518264770508, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.5291292923801758, |
|
"grad_norm": 3.1820855140686035, |
|
"learning_rate": 8.467203366908708e-07, |
|
"logits/chosen": -2.4702260494232178, |
|
"logits/rejected": -2.948544979095459, |
|
"logps/chosen": -4.033299922943115, |
|
"logps/rejected": -7.3781843185424805, |
|
"loss": 2.6089, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -6.0499491691589355, |
|
"rewards/margins": 5.017327785491943, |
|
"rewards/rejected": -11.067277908325195, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.5363776388511371, |
|
"grad_norm": 1.360353708267212, |
|
"learning_rate": 7.886322351782782e-07, |
|
"logits/chosen": -2.4439663887023926, |
|
"logits/rejected": -2.93951678276062, |
|
"logps/chosen": -3.9287734031677246, |
|
"logps/rejected": -8.031271934509277, |
|
"loss": 2.3252, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": -5.893160820007324, |
|
"rewards/margins": 6.15374755859375, |
|
"rewards/rejected": -12.046907424926758, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.5436259853220984, |
|
"grad_norm": 2.4719009399414062, |
|
"learning_rate": 7.322330470336314e-07, |
|
"logits/chosen": -2.4842166900634766, |
|
"logits/rejected": -2.930525302886963, |
|
"logps/chosen": -4.090015888214111, |
|
"logps/rejected": -7.851285457611084, |
|
"loss": 2.4229, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": -6.135023593902588, |
|
"rewards/margins": 5.641904830932617, |
|
"rewards/rejected": -11.776927947998047, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.5436259853220984, |
|
"eval_logits/chosen": -2.5460832118988037, |
|
"eval_logits/rejected": -2.9118590354919434, |
|
"eval_logps/chosen": -4.3249192237854, |
|
"eval_logps/rejected": -7.5089521408081055, |
|
"eval_loss": 0.30500930547714233, |
|
"eval_rewards/accuracies": 0.8571428656578064, |
|
"eval_rewards/chosen": -6.4873785972595215, |
|
"eval_rewards/margins": 4.7760491371154785, |
|
"eval_rewards/rejected": -11.263427734375, |
|
"eval_runtime": 30.2309, |
|
"eval_samples_per_second": 29.506, |
|
"eval_steps_per_second": 3.705, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.5508743317930597, |
|
"grad_norm": 1.5037100315093994, |
|
"learning_rate": 6.775784314464717e-07, |
|
"logits/chosen": -2.536068916320801, |
|
"logits/rejected": -2.9130196571350098, |
|
"logps/chosen": -4.38153076171875, |
|
"logps/rejected": -7.925049781799316, |
|
"loss": 2.2663, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -6.572295188903809, |
|
"rewards/margins": 5.315279960632324, |
|
"rewards/rejected": -11.887575149536133, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.5581226782640211, |
|
"grad_norm": 2.590315818786621, |
|
"learning_rate": 6.247223259238511e-07, |
|
"logits/chosen": -2.5102131366729736, |
|
"logits/rejected": -2.9389328956604004, |
|
"logps/chosen": -4.440621852874756, |
|
"logps/rejected": -7.853024482727051, |
|
"loss": 2.5056, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -6.660933017730713, |
|
"rewards/margins": 5.118602752685547, |
|
"rewards/rejected": -11.779535293579102, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.5653710247349824, |
|
"grad_norm": 1.7628798484802246, |
|
"learning_rate": 5.737168930605272e-07, |
|
"logits/chosen": -2.5515689849853516, |
|
"logits/rejected": -2.930510997772217, |
|
"logps/chosen": -4.128845691680908, |
|
"logps/rejected": -7.9085845947265625, |
|
"loss": 2.2382, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -6.193268775939941, |
|
"rewards/margins": 5.669609069824219, |
|
"rewards/rejected": -11.862876892089844, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.5726193712059436, |
|
"grad_norm": 3.5818936824798584, |
|
"learning_rate": 5.24612469060774e-07, |
|
"logits/chosen": -2.4889330863952637, |
|
"logits/rejected": -2.87908935546875, |
|
"logps/chosen": -4.441933631896973, |
|
"logps/rejected": -8.435145378112793, |
|
"loss": 2.2271, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -6.662900447845459, |
|
"rewards/margins": 5.9898176193237305, |
|
"rewards/rejected": -12.652718544006348, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.5798677176769049, |
|
"grad_norm": 4.387265682220459, |
|
"learning_rate": 4.774575140626317e-07, |
|
"logits/chosen": -2.434931516647339, |
|
"logits/rejected": -2.9189987182617188, |
|
"logps/chosen": -4.684146404266357, |
|
"logps/rejected": -8.587119102478027, |
|
"loss": 2.2777, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -7.026219844818115, |
|
"rewards/margins": 5.854458808898926, |
|
"rewards/rejected": -12.880678176879883, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.5798677176769049, |
|
"eval_logits/chosen": -2.5454742908477783, |
|
"eval_logits/rejected": -2.9115586280822754, |
|
"eval_logps/chosen": -4.826877117156982, |
|
"eval_logps/rejected": -8.15951919555664, |
|
"eval_loss": 0.2918218672275543, |
|
"eval_rewards/accuracies": 0.875, |
|
"eval_rewards/chosen": -7.2403154373168945, |
|
"eval_rewards/margins": 4.998963356018066, |
|
"eval_rewards/rejected": -12.239279747009277, |
|
"eval_runtime": 30.2636, |
|
"eval_samples_per_second": 29.474, |
|
"eval_steps_per_second": 3.701, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.5871160641478663, |
|
"grad_norm": 3.7356505393981934, |
|
"learning_rate": 4.3229856431359516e-07, |
|
"logits/chosen": -2.456073522567749, |
|
"logits/rejected": -2.893204927444458, |
|
"logps/chosen": -4.553319931030273, |
|
"logps/rejected": -8.365021705627441, |
|
"loss": 2.5536, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": -6.829980373382568, |
|
"rewards/margins": 5.717551231384277, |
|
"rewards/rejected": -12.547531127929688, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.5943644106188276, |
|
"grad_norm": 2.5566742420196533, |
|
"learning_rate": 3.891801862449629e-07, |
|
"logits/chosen": -2.439631938934326, |
|
"logits/rejected": -2.931784152984619, |
|
"logps/chosen": -4.470883846282959, |
|
"logps/rejected": -8.863119125366211, |
|
"loss": 2.279, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -6.706325531005859, |
|
"rewards/margins": 6.588353633880615, |
|
"rewards/rejected": -13.294679641723633, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.6016127570897889, |
|
"grad_norm": 3.1389355659484863, |
|
"learning_rate": 3.481449324901412e-07, |
|
"logits/chosen": -2.4623847007751465, |
|
"logits/rejected": -2.9169857501983643, |
|
"logps/chosen": -4.818366050720215, |
|
"logps/rejected": -8.847421646118164, |
|
"loss": 2.1291, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -7.227549076080322, |
|
"rewards/margins": 6.043582439422607, |
|
"rewards/rejected": -13.271130561828613, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.6088611035607502, |
|
"grad_norm": 2.467841625213623, |
|
"learning_rate": 3.092332998903416e-07, |
|
"logits/chosen": -2.5648622512817383, |
|
"logits/rejected": -2.8750557899475098, |
|
"logps/chosen": -4.850269794464111, |
|
"logps/rejected": -8.536476135253906, |
|
"loss": 2.2548, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -7.275404930114746, |
|
"rewards/margins": 5.529307842254639, |
|
"rewards/rejected": -12.804713249206543, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.6161094500317115, |
|
"grad_norm": 2.4988455772399902, |
|
"learning_rate": 2.7248368952908055e-07, |
|
"logits/chosen": -2.439044952392578, |
|
"logits/rejected": -2.8845443725585938, |
|
"logps/chosen": -4.711085319519043, |
|
"logps/rejected": -8.678643226623535, |
|
"loss": 2.2852, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -7.066628456115723, |
|
"rewards/margins": 5.951335906982422, |
|
"rewards/rejected": -13.017964363098145, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.6161094500317115, |
|
"eval_logits/chosen": -2.5347232818603516, |
|
"eval_logits/rejected": -2.8995745182037354, |
|
"eval_logps/chosen": -4.948782444000244, |
|
"eval_logps/rejected": -8.324250221252441, |
|
"eval_loss": 0.2818227708339691, |
|
"eval_rewards/accuracies": 0.875, |
|
"eval_rewards/chosen": -7.423173427581787, |
|
"eval_rewards/margins": 5.063201427459717, |
|
"eval_rewards/rejected": -12.48637580871582, |
|
"eval_runtime": 30.2383, |
|
"eval_samples_per_second": 29.499, |
|
"eval_steps_per_second": 3.704, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.6233577965026729, |
|
"grad_norm": 3.6566624641418457, |
|
"learning_rate": 2.3793236883495164e-07, |
|
"logits/chosen": -2.522355556488037, |
|
"logits/rejected": -2.9092490673065186, |
|
"logps/chosen": -5.117705345153809, |
|
"logps/rejected": -8.789793014526367, |
|
"loss": 2.2219, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": -7.676558017730713, |
|
"rewards/margins": 5.508131504058838, |
|
"rewards/rejected": -13.18468952178955, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.6306061429736342, |
|
"grad_norm": 3.4471309185028076, |
|
"learning_rate": 2.0561343579004716e-07, |
|
"logits/chosen": -2.478935480117798, |
|
"logits/rejected": -2.8984580039978027, |
|
"logps/chosen": -4.6364336013793945, |
|
"logps/rejected": -8.75157356262207, |
|
"loss": 2.0656, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": -6.954649925231934, |
|
"rewards/margins": 6.1727094650268555, |
|
"rewards/rejected": -13.127359390258789, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.6378544894445954, |
|
"grad_norm": 5.826159477233887, |
|
"learning_rate": 1.7555878527937164e-07, |
|
"logits/chosen": -2.543975830078125, |
|
"logits/rejected": -2.8908252716064453, |
|
"logps/chosen": -4.740494728088379, |
|
"logps/rejected": -8.813726425170898, |
|
"loss": 2.2936, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": -7.110742092132568, |
|
"rewards/margins": 6.1098480224609375, |
|
"rewards/rejected": -13.220590591430664, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.6451028359155567, |
|
"grad_norm": 1.7311068773269653, |
|
"learning_rate": 1.4779807761443638e-07, |
|
"logits/chosen": -2.4625258445739746, |
|
"logits/rejected": -2.8965325355529785, |
|
"logps/chosen": -5.210020542144775, |
|
"logps/rejected": -9.470269203186035, |
|
"loss": 2.1442, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": -7.815030097961426, |
|
"rewards/margins": 6.3903727531433105, |
|
"rewards/rejected": -14.205403327941895, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.6523511823865181, |
|
"grad_norm": 3.3928353786468506, |
|
"learning_rate": 1.223587092621162e-07, |
|
"logits/chosen": -2.5978572368621826, |
|
"logits/rejected": -2.910475492477417, |
|
"logps/chosen": -5.280083656311035, |
|
"logps/rejected": -8.648152351379395, |
|
"loss": 2.6127, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": -7.9201250076293945, |
|
"rewards/margins": 5.052102088928223, |
|
"rewards/rejected": -12.972227096557617, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.6523511823865181, |
|
"eval_logits/chosen": -2.528658628463745, |
|
"eval_logits/rejected": -2.896522045135498, |
|
"eval_logps/chosen": -5.151096820831299, |
|
"eval_logps/rejected": -8.659164428710938, |
|
"eval_loss": 0.27813127636909485, |
|
"eval_rewards/accuracies": 0.9017857313156128, |
|
"eval_rewards/chosen": -7.7266459465026855, |
|
"eval_rewards/margins": 5.262101650238037, |
|
"eval_rewards/rejected": -12.988746643066406, |
|
"eval_runtime": 30.2603, |
|
"eval_samples_per_second": 29.478, |
|
"eval_steps_per_second": 3.701, |
|
"step": 900 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.342232258554626e+18, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|