|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 42, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1e-07, |
|
"logits/chosen": -2.961165428161621, |
|
"logits/rejected": -2.9065260887145996, |
|
"logps/chosen": -301.09039306640625, |
|
"logps/pi_response": -154.45700073242188, |
|
"logps/ref_response": -154.45700073242188, |
|
"logps/rejected": -312.41326904296875, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.778071225970339e-07, |
|
"logits/chosen": -2.816866159439087, |
|
"logits/rejected": -2.6957647800445557, |
|
"logps/chosen": -267.56488037109375, |
|
"logps/pi_response": -182.86256408691406, |
|
"logps/ref_response": -182.52423095703125, |
|
"logps/rejected": -226.00772094726562, |
|
"loss": 0.6925, |
|
"rewards/accuracies": 0.4444444477558136, |
|
"rewards/chosen": -0.0020490202587097883, |
|
"rewards/margins": 0.002598464023321867, |
|
"rewards/rejected": -0.004647484514862299, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 3.2320569281913754e-07, |
|
"logits/chosen": -2.7879672050476074, |
|
"logits/rejected": -2.814840078353882, |
|
"logps/chosen": -255.08975219726562, |
|
"logps/pi_response": -235.3483428955078, |
|
"logps/ref_response": -233.54464721679688, |
|
"logps/rejected": -263.6041564941406, |
|
"loss": 0.6868, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.014762332662940025, |
|
"rewards/margins": 0.012134673073887825, |
|
"rewards/rejected": -0.026897007599473, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.189231791106921e-07, |
|
"logits/chosen": -2.702427387237549, |
|
"logits/rejected": -2.7164032459259033, |
|
"logps/chosen": -194.59146118164062, |
|
"logps/pi_response": -153.37057495117188, |
|
"logps/ref_response": -152.309814453125, |
|
"logps/rejected": -225.66796875, |
|
"loss": 0.6759, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.0013326064217835665, |
|
"rewards/margins": 0.022419044747948647, |
|
"rewards/rejected": -0.02108643762767315, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 3.5960224130728858e-09, |
|
"logits/chosen": -2.7800514698028564, |
|
"logits/rejected": -2.6079983711242676, |
|
"logps/chosen": -291.4418029785156, |
|
"logps/pi_response": -189.13177490234375, |
|
"logps/ref_response": -187.91622924804688, |
|
"logps/rejected": -244.4289093017578, |
|
"loss": 0.6732, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -0.02857491932809353, |
|
"rewards/margins": 0.013249958865344524, |
|
"rewards/rejected": -0.041824884712696075, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 42, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6818081367583502, |
|
"train_runtime": 1329.5916, |
|
"train_samples_per_second": 1.128, |
|
"train_steps_per_second": 0.032 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 42, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|