|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.985781990521327, |
|
"eval_steps": 100, |
|
"global_step": 52, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 555.6228482194545, |
|
"learning_rate": 8.333333333333333e-08, |
|
"logits/chosen": 135.66180419921875, |
|
"logits/rejected": 110.02157592773438, |
|
"logps/chosen": -827.0068359375, |
|
"logps/rejected": -818.9014892578125, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 676.2488072908993, |
|
"learning_rate": 4.907293218369498e-07, |
|
"logits/chosen": 121.71678161621094, |
|
"logits/rejected": 142.29522705078125, |
|
"logps/chosen": -812.8253784179688, |
|
"logps/rejected": -902.03662109375, |
|
"loss": 0.8916, |
|
"rewards/accuracies": 0.5208333134651184, |
|
"rewards/chosen": 1.315455436706543, |
|
"rewards/margins": 0.254628986120224, |
|
"rewards/rejected": 1.0608265399932861, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 380.7231206035148, |
|
"learning_rate": 3.941700805287168e-07, |
|
"logits/chosen": 132.0984344482422, |
|
"logits/rejected": 141.592041015625, |
|
"logps/chosen": -816.7606201171875, |
|
"logps/rejected": -899.7657470703125, |
|
"loss": 0.7706, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.9781860113143921, |
|
"rewards/margins": 1.6455551385879517, |
|
"rewards/rejected": -2.6237411499023438, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 238.91419418333624, |
|
"learning_rate": 2.3293939665883228e-07, |
|
"logits/chosen": 128.60267639160156, |
|
"logits/rejected": 132.55734252929688, |
|
"logps/chosen": -847.0579833984375, |
|
"logps/rejected": -911.5173950195312, |
|
"loss": 0.6789, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.37434953451156616, |
|
"rewards/margins": 2.5063283443450928, |
|
"rewards/rejected": -2.8806777000427246, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 377.40289333938733, |
|
"learning_rate": 7.936171419533652e-08, |
|
"logits/chosen": 130.8309783935547, |
|
"logits/rejected": 126.89227294921875, |
|
"logps/chosen": -860.4659423828125, |
|
"logps/rejected": -873.3118896484375, |
|
"loss": 0.6884, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.2398359328508377, |
|
"rewards/margins": 1.8883726596832275, |
|
"rewards/rejected": -2.128208637237549, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 313.23696106113914, |
|
"learning_rate": 2.328513490917311e-09, |
|
"logits/chosen": 137.02291870117188, |
|
"logits/rejected": 137.97555541992188, |
|
"logps/chosen": -872.61572265625, |
|
"logps/rejected": -902.748046875, |
|
"loss": 0.6086, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.32394886016845703, |
|
"rewards/margins": 1.9923664331436157, |
|
"rewards/rejected": -2.3163154125213623, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 52, |
|
"total_flos": 0.0, |
|
"train_loss": 0.7234916411913358, |
|
"train_runtime": 575.1359, |
|
"train_samples_per_second": 11.736, |
|
"train_steps_per_second": 0.09 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 52, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|