|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.14496692941922623, |
|
"eval_steps": 50, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.007248346470961312, |
|
"grad_norm": 0.07235438376665115, |
|
"learning_rate": 4.998766400914329e-06, |
|
"logits/chosen": -1.9785633087158203, |
|
"logits/rejected": -2.5380234718322754, |
|
"logps/chosen": -0.28137442469596863, |
|
"logps/rejected": -0.37793582677841187, |
|
"loss": 7.3908, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.42206162214279175, |
|
"rewards/margins": 0.14484205842018127, |
|
"rewards/rejected": -0.5669037103652954, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.014496692941922623, |
|
"grad_norm": 0.07455909997224808, |
|
"learning_rate": 4.9950668210706795e-06, |
|
"logits/chosen": -2.0576319694519043, |
|
"logits/rejected": -2.499069929122925, |
|
"logps/chosen": -0.2769497036933899, |
|
"logps/rejected": -0.33560773730278015, |
|
"loss": 7.3753, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -0.41542449593544006, |
|
"rewards/margins": 0.08798708021640778, |
|
"rewards/rejected": -0.503411591053009, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.021745039412883936, |
|
"grad_norm": 0.09561269730329514, |
|
"learning_rate": 4.9889049115077e-06, |
|
"logits/chosen": -2.1018643379211426, |
|
"logits/rejected": -2.377673625946045, |
|
"logps/chosen": -0.2673099935054779, |
|
"logps/rejected": -0.3057115972042084, |
|
"loss": 7.4516, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": -0.40096497535705566, |
|
"rewards/margins": 0.05760239437222481, |
|
"rewards/rejected": -0.45856744050979614, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.028993385883845247, |
|
"grad_norm": 0.08169445395469666, |
|
"learning_rate": 4.980286753286196e-06, |
|
"logits/chosen": -2.1627352237701416, |
|
"logits/rejected": -2.3873391151428223, |
|
"logps/chosen": -0.27636194229125977, |
|
"logps/rejected": -0.3703404664993286, |
|
"loss": 7.3888, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.4145428538322449, |
|
"rewards/margins": 0.14096775650978088, |
|
"rewards/rejected": -0.5555106401443481, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03624173235480656, |
|
"grad_norm": 0.08877147734165192, |
|
"learning_rate": 4.9692208514878445e-06, |
|
"logits/chosen": -2.1400368213653564, |
|
"logits/rejected": -2.44627046585083, |
|
"logps/chosen": -0.25384199619293213, |
|
"logps/rejected": -0.309225857257843, |
|
"loss": 7.518, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": -0.3807629942893982, |
|
"rewards/margins": 0.08307582885026932, |
|
"rewards/rejected": -0.4638388752937317, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03624173235480656, |
|
"eval_logits/chosen": -2.1149837970733643, |
|
"eval_logits/rejected": -2.4759538173675537, |
|
"eval_logps/chosen": -0.28288090229034424, |
|
"eval_logps/rejected": -0.3434317111968994, |
|
"eval_loss": 0.9120966196060181, |
|
"eval_rewards/accuracies": 0.5089285969734192, |
|
"eval_rewards/chosen": -0.42432135343551636, |
|
"eval_rewards/margins": 0.09082622081041336, |
|
"eval_rewards/rejected": -0.5151475667953491, |
|
"eval_runtime": 30.2473, |
|
"eval_samples_per_second": 29.49, |
|
"eval_steps_per_second": 3.703, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04349007882576787, |
|
"grad_norm": 0.11774340271949768, |
|
"learning_rate": 4.9557181268217225e-06, |
|
"logits/chosen": -2.0567967891693115, |
|
"logits/rejected": -2.545668125152588, |
|
"logps/chosen": -0.2460743933916092, |
|
"logps/rejected": -0.32390663027763367, |
|
"loss": 7.3929, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": -0.3691115975379944, |
|
"rewards/margins": 0.1167483925819397, |
|
"rewards/rejected": -0.4858599603176117, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.05073842529672919, |
|
"grad_norm": 0.10012238472700119, |
|
"learning_rate": 4.939791904846869e-06, |
|
"logits/chosen": -1.9286606311798096, |
|
"logits/rejected": -2.4620895385742188, |
|
"logps/chosen": -0.25992274284362793, |
|
"logps/rejected": -0.3665553629398346, |
|
"loss": 7.1863, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.3898841440677643, |
|
"rewards/margins": 0.15994893014431, |
|
"rewards/rejected": -0.5498330593109131, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.057986771767690494, |
|
"grad_norm": 0.07387609034776688, |
|
"learning_rate": 4.921457902821578e-06, |
|
"logits/chosen": -2.0778262615203857, |
|
"logits/rejected": -2.4614813327789307, |
|
"logps/chosen": -0.24521782994270325, |
|
"logps/rejected": -0.3450419306755066, |
|
"loss": 7.3662, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.3678267002105713, |
|
"rewards/margins": 0.1497361809015274, |
|
"rewards/rejected": -0.5175628662109375, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.0652351182386518, |
|
"grad_norm": 0.09718403220176697, |
|
"learning_rate": 4.900734214192358e-06, |
|
"logits/chosen": -1.998186707496643, |
|
"logits/rejected": -2.4382071495056152, |
|
"logps/chosen": -0.24080593883991241, |
|
"logps/rejected": -0.3280099332332611, |
|
"loss": 7.268, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.36120888590812683, |
|
"rewards/margins": 0.13080602884292603, |
|
"rewards/rejected": -0.49201488494873047, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.07248346470961312, |
|
"grad_norm": 0.08059138059616089, |
|
"learning_rate": 4.8776412907378845e-06, |
|
"logits/chosen": -1.894728422164917, |
|
"logits/rejected": -2.475407838821411, |
|
"logps/chosen": -0.2019994705915451, |
|
"logps/rejected": -0.29733040928840637, |
|
"loss": 7.2544, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.30299919843673706, |
|
"rewards/margins": 0.14299637079238892, |
|
"rewards/rejected": -0.445995569229126, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07248346470961312, |
|
"eval_logits/chosen": -2.1233773231506348, |
|
"eval_logits/rejected": -2.4810428619384766, |
|
"eval_logps/chosen": -0.24400465190410614, |
|
"eval_logps/rejected": -0.3260345160961151, |
|
"eval_loss": 0.8920583724975586, |
|
"eval_rewards/accuracies": 0.5625, |
|
"eval_rewards/chosen": -0.3660070598125458, |
|
"eval_rewards/margins": 0.12304472178220749, |
|
"eval_rewards/rejected": -0.4890517592430115, |
|
"eval_runtime": 30.28, |
|
"eval_samples_per_second": 29.458, |
|
"eval_steps_per_second": 3.699, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07973181118057443, |
|
"grad_norm": 0.060006480664014816, |
|
"learning_rate": 4.852201922385564e-06, |
|
"logits/chosen": -2.1128883361816406, |
|
"logits/rejected": -2.5161118507385254, |
|
"logps/chosen": -0.23731942474842072, |
|
"logps/rejected": -0.31481316685676575, |
|
"loss": 7.1762, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.3559790849685669, |
|
"rewards/margins": 0.11624068021774292, |
|
"rewards/rejected": -0.4722197651863098, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.08698015765153574, |
|
"grad_norm": 0.0602690726518631, |
|
"learning_rate": 4.824441214720629e-06, |
|
"logits/chosen": -2.0212602615356445, |
|
"logits/rejected": -2.4894890785217285, |
|
"logps/chosen": -0.18956038355827332, |
|
"logps/rejected": -0.31935763359069824, |
|
"loss": 7.1442, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.2843405604362488, |
|
"rewards/margins": 0.19469590485095978, |
|
"rewards/rejected": -0.47903648018836975, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.09422850412249706, |
|
"grad_norm": 0.06425223499536514, |
|
"learning_rate": 4.794386564209953e-06, |
|
"logits/chosen": -2.06717848777771, |
|
"logits/rejected": -2.5086405277252197, |
|
"logps/chosen": -0.2075866460800171, |
|
"logps/rejected": -0.3124083876609802, |
|
"loss": 7.1449, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.31137990951538086, |
|
"rewards/margins": 0.15723267197608948, |
|
"rewards/rejected": -0.46861258149147034, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.10147685059345837, |
|
"grad_norm": 0.08793757855892181, |
|
"learning_rate": 4.762067631165049e-06, |
|
"logits/chosen": -2.0242953300476074, |
|
"logits/rejected": -2.4943737983703613, |
|
"logps/chosen": -0.19989363849163055, |
|
"logps/rejected": -0.32057636976242065, |
|
"loss": 7.122, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.29984045028686523, |
|
"rewards/margins": 0.18102414906024933, |
|
"rewards/rejected": -0.48086461424827576, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.10872519706441967, |
|
"grad_norm": 0.09698841720819473, |
|
"learning_rate": 4.72751631047092e-06, |
|
"logits/chosen": -2.098658800125122, |
|
"logits/rejected": -2.582111120223999, |
|
"logps/chosen": -0.18897351622581482, |
|
"logps/rejected": -0.363709419965744, |
|
"loss": 7.1253, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.28346025943756104, |
|
"rewards/margins": 0.2621038556098938, |
|
"rewards/rejected": -0.5455641150474548, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.10872519706441967, |
|
"eval_logits/chosen": -2.1921768188476562, |
|
"eval_logits/rejected": -2.556370973587036, |
|
"eval_logps/chosen": -0.22214026749134064, |
|
"eval_logps/rejected": -0.3240993022918701, |
|
"eval_loss": 0.8764163851737976, |
|
"eval_rewards/accuracies": 0.5803571343421936, |
|
"eval_rewards/chosen": -0.33321040868759155, |
|
"eval_rewards/margins": 0.15293852984905243, |
|
"eval_rewards/rejected": -0.48614898324012756, |
|
"eval_runtime": 30.273, |
|
"eval_samples_per_second": 29.465, |
|
"eval_steps_per_second": 3.7, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.11597354353538099, |
|
"grad_norm": 0.09119638800621033, |
|
"learning_rate": 4.690766700109659e-06, |
|
"logits/chosen": -2.2321066856384277, |
|
"logits/rejected": -2.5475690364837646, |
|
"logps/chosen": -0.19602122902870178, |
|
"logps/rejected": -0.27682799100875854, |
|
"loss": 7.0307, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": -0.2940318286418915, |
|
"rewards/margins": 0.12121014297008514, |
|
"rewards/rejected": -0.4152420163154602, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.1232218900063423, |
|
"grad_norm": 0.11496366560459137, |
|
"learning_rate": 4.65185506750986e-06, |
|
"logits/chosen": -2.1506271362304688, |
|
"logits/rejected": -2.506520986557007, |
|
"logps/chosen": -0.18018664419651031, |
|
"logps/rejected": -0.3084454834461212, |
|
"loss": 7.1652, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.27028003334999084, |
|
"rewards/margins": 0.19238826632499695, |
|
"rewards/rejected": -0.462668240070343, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.1304702364773036, |
|
"grad_norm": 0.09201692789793015, |
|
"learning_rate": 4.610819813755038e-06, |
|
"logits/chosen": -2.285245895385742, |
|
"logits/rejected": -2.573941707611084, |
|
"logps/chosen": -0.1844937801361084, |
|
"logps/rejected": -0.3112415075302124, |
|
"loss": 7.0022, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.2767406404018402, |
|
"rewards/margins": 0.1901216208934784, |
|
"rewards/rejected": -0.4668622612953186, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.13771858294826492, |
|
"grad_norm": 0.13191230595111847, |
|
"learning_rate": 4.567701435686405e-06, |
|
"logits/chosen": -2.265538454055786, |
|
"logits/rejected": -2.569638252258301, |
|
"logps/chosen": -0.21122264862060547, |
|
"logps/rejected": -0.3469196856021881, |
|
"loss": 7.004, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.3168340027332306, |
|
"rewards/margins": 0.20354552567005157, |
|
"rewards/rejected": -0.5203795433044434, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.14496692941922623, |
|
"grad_norm": 0.1503789722919464, |
|
"learning_rate": 4.522542485937369e-06, |
|
"logits/chosen": -2.2021026611328125, |
|
"logits/rejected": -2.655961513519287, |
|
"logps/chosen": -0.19664816558361053, |
|
"logps/rejected": -0.34679359197616577, |
|
"loss": 7.0539, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.2949722409248352, |
|
"rewards/margins": 0.22521813213825226, |
|
"rewards/rejected": -0.5201903581619263, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.14496692941922623, |
|
"eval_logits/chosen": -2.325611114501953, |
|
"eval_logits/rejected": -2.715676784515381, |
|
"eval_logps/chosen": -0.2261900007724762, |
|
"eval_logps/rejected": -0.3570065200328827, |
|
"eval_loss": 0.8565592169761658, |
|
"eval_rewards/accuracies": 0.5714285969734192, |
|
"eval_rewards/chosen": -0.3392849862575531, |
|
"eval_rewards/margins": 0.19622473418712616, |
|
"eval_rewards/rejected": -0.5355097055435181, |
|
"eval_runtime": 30.2681, |
|
"eval_samples_per_second": 29.47, |
|
"eval_steps_per_second": 3.7, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.387465726191206e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|