|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.10872519706441967, |
|
"eval_steps": 50, |
|
"global_step": 150, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.007248346470961312, |
|
"grad_norm": 0.07201674580574036, |
|
"learning_rate": 4.999451708687114e-06, |
|
"logits/chosen": -1.9793262481689453, |
|
"logits/rejected": -2.5381760597229004, |
|
"logps/chosen": -0.28126341104507446, |
|
"logps/rejected": -0.3779803514480591, |
|
"loss": 7.3904, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.4218950867652893, |
|
"rewards/margins": 0.14507544040679932, |
|
"rewards/rejected": -0.5669704675674438, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.014496692941922623, |
|
"grad_norm": 0.07562297582626343, |
|
"learning_rate": 4.997807075247147e-06, |
|
"logits/chosen": -2.0567643642425537, |
|
"logits/rejected": -2.4989147186279297, |
|
"logps/chosen": -0.27690139412879944, |
|
"logps/rejected": -0.33544114232063293, |
|
"loss": 7.3756, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -0.41535210609436035, |
|
"rewards/margins": 0.08780960738658905, |
|
"rewards/rejected": -0.5031617283821106, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.021745039412883936, |
|
"grad_norm": 0.09685570746660233, |
|
"learning_rate": 4.9950668210706795e-06, |
|
"logits/chosen": -2.10174298286438, |
|
"logits/rejected": -2.378197431564331, |
|
"logps/chosen": -0.26717427372932434, |
|
"logps/rejected": -0.30565372109413147, |
|
"loss": 7.451, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": -0.4007614254951477, |
|
"rewards/margins": 0.05771917849779129, |
|
"rewards/rejected": -0.4584805369377136, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.028993385883845247, |
|
"grad_norm": 0.08213861286640167, |
|
"learning_rate": 4.9912321481237616e-06, |
|
"logits/chosen": -2.1633317470550537, |
|
"logits/rejected": -2.387866497039795, |
|
"logps/chosen": -0.27634260058403015, |
|
"logps/rejected": -0.37035584449768066, |
|
"loss": 7.3892, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.4145139157772064, |
|
"rewards/margins": 0.14101983606815338, |
|
"rewards/rejected": -0.555533766746521, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03624173235480656, |
|
"grad_norm": 0.08846044540405273, |
|
"learning_rate": 4.986304738420684e-06, |
|
"logits/chosen": -2.1402599811553955, |
|
"logits/rejected": -2.4459526538848877, |
|
"logps/chosen": -0.2535383999347687, |
|
"logps/rejected": -0.3090876042842865, |
|
"loss": 7.5171, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": -0.3803076148033142, |
|
"rewards/margins": 0.08332376182079315, |
|
"rewards/rejected": -0.4636313319206238, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03624173235480656, |
|
"eval_logits/chosen": -2.1165692806243896, |
|
"eval_logits/rejected": -2.476428747177124, |
|
"eval_logps/chosen": -0.2828062176704407, |
|
"eval_logps/rejected": -0.3432886600494385, |
|
"eval_loss": 0.9120001792907715, |
|
"eval_rewards/accuracies": 0.5089285969734192, |
|
"eval_rewards/chosen": -0.4242093861103058, |
|
"eval_rewards/margins": 0.09072363376617432, |
|
"eval_rewards/rejected": -0.5149329900741577, |
|
"eval_runtime": 30.971, |
|
"eval_samples_per_second": 28.801, |
|
"eval_steps_per_second": 3.616, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04349007882576787, |
|
"grad_norm": 0.11753705143928528, |
|
"learning_rate": 4.980286753286196e-06, |
|
"logits/chosen": -2.0575368404388428, |
|
"logits/rejected": -2.5456700325012207, |
|
"logps/chosen": -0.24622221291065216, |
|
"logps/rejected": -0.32402220368385315, |
|
"loss": 7.3926, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.36933332681655884, |
|
"rewards/margins": 0.11669999361038208, |
|
"rewards/rejected": -0.48603329062461853, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.05073842529672919, |
|
"grad_norm": 0.09996571391820908, |
|
"learning_rate": 4.973180832407471e-06, |
|
"logits/chosen": -1.9278606176376343, |
|
"logits/rejected": -2.4620182514190674, |
|
"logps/chosen": -0.2596542239189148, |
|
"logps/rejected": -0.3665880560874939, |
|
"loss": 7.185, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.3894812762737274, |
|
"rewards/margins": 0.160400852560997, |
|
"rewards/rejected": -0.5498821139335632, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.057986771767690494, |
|
"grad_norm": 0.07287321239709854, |
|
"learning_rate": 4.964990092676263e-06, |
|
"logits/chosen": -2.078031063079834, |
|
"logits/rejected": -2.461479663848877, |
|
"logps/chosen": -0.24513795971870422, |
|
"logps/rejected": -0.3448730707168579, |
|
"loss": 7.365, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.36770695447921753, |
|
"rewards/margins": 0.14960262179374695, |
|
"rewards/rejected": -0.5173095464706421, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.0652351182386518, |
|
"grad_norm": 0.09656044095754623, |
|
"learning_rate": 4.9557181268217225e-06, |
|
"logits/chosen": -1.9979238510131836, |
|
"logits/rejected": -2.4381277561187744, |
|
"logps/chosen": -0.24047240614891052, |
|
"logps/rejected": -0.3277527987957001, |
|
"loss": 7.2664, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.36070865392684937, |
|
"rewards/margins": 0.13092057406902313, |
|
"rewards/rejected": -0.4916292130947113, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.07248346470961312, |
|
"grad_norm": 0.08125138282775879, |
|
"learning_rate": 4.9453690018345144e-06, |
|
"logits/chosen": -1.8948112726211548, |
|
"logits/rejected": -2.4755520820617676, |
|
"logps/chosen": -0.20189261436462402, |
|
"logps/rejected": -0.29732149839401245, |
|
"loss": 7.2542, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.30283889174461365, |
|
"rewards/margins": 0.14314329624176025, |
|
"rewards/rejected": -0.44598227739334106, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07248346470961312, |
|
"eval_logits/chosen": -2.12322735786438, |
|
"eval_logits/rejected": -2.481174945831299, |
|
"eval_logps/chosen": -0.2438412606716156, |
|
"eval_logps/rejected": -0.3260033428668976, |
|
"eval_loss": 0.891861081123352, |
|
"eval_rewards/accuracies": 0.5625, |
|
"eval_rewards/chosen": -0.3657619059085846, |
|
"eval_rewards/margins": 0.12324309349060059, |
|
"eval_rewards/rejected": -0.48900502920150757, |
|
"eval_runtime": 30.3299, |
|
"eval_samples_per_second": 29.41, |
|
"eval_steps_per_second": 3.693, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07973181118057443, |
|
"grad_norm": 0.05962231010198593, |
|
"learning_rate": 4.933947257182901e-06, |
|
"logits/chosen": -2.1134355068206787, |
|
"logits/rejected": -2.516538381576538, |
|
"logps/chosen": -0.237023264169693, |
|
"logps/rejected": -0.31476154923439026, |
|
"loss": 7.1749, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.3555349111557007, |
|
"rewards/margins": 0.11660744249820709, |
|
"rewards/rejected": -0.4721423089504242, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.08698015765153574, |
|
"grad_norm": 0.06015922501683235, |
|
"learning_rate": 4.921457902821578e-06, |
|
"logits/chosen": -2.0215041637420654, |
|
"logits/rejected": -2.4902031421661377, |
|
"logps/chosen": -0.1890055537223816, |
|
"logps/rejected": -0.3192065358161926, |
|
"loss": 7.142, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.2835083603858948, |
|
"rewards/margins": 0.19530144333839417, |
|
"rewards/rejected": -0.47880974411964417, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.09422850412249706, |
|
"grad_norm": 0.06430571526288986, |
|
"learning_rate": 4.907906416994146e-06, |
|
"logits/chosen": -2.0684752464294434, |
|
"logits/rejected": -2.510018825531006, |
|
"logps/chosen": -0.2073744535446167, |
|
"logps/rejected": -0.3121300935745239, |
|
"loss": 7.1438, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.31106168031692505, |
|
"rewards/margins": 0.15713343024253845, |
|
"rewards/rejected": -0.4681951403617859, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.10147685059345837, |
|
"grad_norm": 0.08829955011606216, |
|
"learning_rate": 4.893298743830168e-06, |
|
"logits/chosen": -2.026458263397217, |
|
"logits/rejected": -2.496157646179199, |
|
"logps/chosen": -0.19946983456611633, |
|
"logps/rejected": -0.32050156593322754, |
|
"loss": 7.1196, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.2992047667503357, |
|
"rewards/margins": 0.18154758214950562, |
|
"rewards/rejected": -0.4807523787021637, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.10872519706441967, |
|
"grad_norm": 0.09773921221494675, |
|
"learning_rate": 4.8776412907378845e-06, |
|
"logits/chosen": -2.101029872894287, |
|
"logits/rejected": -2.5849032402038574, |
|
"logps/chosen": -0.18889756500720978, |
|
"logps/rejected": -0.36427801847457886, |
|
"loss": 7.1227, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.28334635496139526, |
|
"rewards/margins": 0.26307064294815063, |
|
"rewards/rejected": -0.5464169979095459, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.10872519706441967, |
|
"eval_logits/chosen": -2.1945455074310303, |
|
"eval_logits/rejected": -2.559415578842163, |
|
"eval_logps/chosen": -0.22209034860134125, |
|
"eval_logps/rejected": -0.32476040720939636, |
|
"eval_loss": 0.8760393261909485, |
|
"eval_rewards/accuracies": 0.5803571343421936, |
|
"eval_rewards/chosen": -0.33313554525375366, |
|
"eval_rewards/margins": 0.15400508046150208, |
|
"eval_rewards/rejected": -0.48714062571525574, |
|
"eval_runtime": 30.3484, |
|
"eval_samples_per_second": 29.392, |
|
"eval_steps_per_second": 3.69, |
|
"step": 150 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.563909198850294e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|