phi3m0128-wds-0.7-kendall-onof-ofif-corr-max-2-simpo-max1500-default
/
checkpoint-50
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.04594004823705065, | |
"eval_steps": 50, | |
"global_step": 50, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.00918800964741013, | |
"grad_norm": 0.036612071096897125, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": 15.01579761505127, | |
"logits/rejected": 15.359031677246094, | |
"logps/chosen": -0.2681262791156769, | |
"logps/rejected": -0.31947994232177734, | |
"loss": 0.9551, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.40218934416770935, | |
"rewards/margins": 0.07703053951263428, | |
"rewards/rejected": -0.479219913482666, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.01837601929482026, | |
"grad_norm": 0.05575725808739662, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": 14.570712089538574, | |
"logits/rejected": 15.321355819702148, | |
"logps/chosen": -0.2867889404296875, | |
"logps/rejected": -0.3514837622642517, | |
"loss": 0.923, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.43018341064453125, | |
"rewards/margins": 0.09704220294952393, | |
"rewards/rejected": -0.5272256135940552, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.02756402894223039, | |
"grad_norm": 0.0492466576397419, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": 14.748420715332031, | |
"logits/rejected": 14.969354629516602, | |
"logps/chosen": -0.28405922651290894, | |
"logps/rejected": -0.32855403423309326, | |
"loss": 0.9357, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -0.426088809967041, | |
"rewards/margins": 0.06674225628376007, | |
"rewards/rejected": -0.4928310811519623, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.03675203858964052, | |
"grad_norm": 0.05719422921538353, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": 14.28278923034668, | |
"logits/rejected": 14.76964282989502, | |
"logps/chosen": -0.27940627932548523, | |
"logps/rejected": -0.3408831059932709, | |
"loss": 0.9215, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.41910940408706665, | |
"rewards/margins": 0.09221524000167847, | |
"rewards/rejected": -0.5113246440887451, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.04594004823705065, | |
"grad_norm": 0.06247895210981369, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": 14.943578720092773, | |
"logits/rejected": 14.936178207397461, | |
"logps/chosen": -0.2819541394710541, | |
"logps/rejected": -0.3245392441749573, | |
"loss": 0.9464, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.4229312539100647, | |
"rewards/margins": 0.06387762725353241, | |
"rewards/rejected": -0.4868088662624359, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04594004823705065, | |
"eval_logits/chosen": 14.7594575881958, | |
"eval_logits/rejected": 15.193694114685059, | |
"eval_logps/chosen": -0.2807807922363281, | |
"eval_logps/rejected": -0.36209535598754883, | |
"eval_loss": 0.9397181868553162, | |
"eval_rewards/accuracies": 0.5681818127632141, | |
"eval_rewards/chosen": -0.4211711883544922, | |
"eval_rewards/margins": 0.12197184562683105, | |
"eval_rewards/rejected": -0.5431429743766785, | |
"eval_runtime": 24.9762, | |
"eval_samples_per_second": 28.187, | |
"eval_steps_per_second": 3.523, | |
"step": 50 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 1.2235350146757427e+17, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |