zephyr-gemma-rpo / trainer_state.json
ZHLiu627's picture
1
251b0a9
raw
history blame
8.13 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.971563981042654,
"eval_steps": 50,
"global_step": 104,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.018957345971563982,
"eta": 0.004999999422580004,
"grad_norm": 188.9079473976178,
"learning_rate": 4.545454545454545e-08,
"logits/chosen": 204.35707092285156,
"logits/rejected": 182.54800415039062,
"logps/chosen": -443.4819030761719,
"logps/rejected": -434.7948303222656,
"loss": 0.786,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.1895734597156398,
"eta": 0.004999998956918716,
"grad_norm": 122.11894542304172,
"learning_rate": 4.545454545454545e-07,
"logits/chosen": 175.2365264892578,
"logits/rejected": 185.066162109375,
"logps/chosen": -384.705078125,
"logps/rejected": -446.1976013183594,
"loss": 0.8045,
"rewards/accuracies": 0.4166666567325592,
"rewards/chosen": 0.06007244065403938,
"rewards/margins": -0.035397082567214966,
"rewards/rejected": 0.09546952694654465,
"step": 10
},
{
"epoch": 0.3791469194312796,
"eta": 0.004999999888241291,
"grad_norm": 139.6567004586221,
"learning_rate": 4.885348141000122e-07,
"logits/chosen": 176.22576904296875,
"logits/rejected": 177.17520141601562,
"logps/chosen": -368.12298583984375,
"logps/rejected": -404.42626953125,
"loss": 0.7339,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": 0.8393497467041016,
"rewards/margins": 0.46265918016433716,
"rewards/rejected": 0.376690536737442,
"step": 20
},
{
"epoch": 0.5687203791469194,
"eta": 0.004999999888241291,
"grad_norm": 117.96859521631961,
"learning_rate": 4.5025027361734613e-07,
"logits/chosen": 169.5486602783203,
"logits/rejected": 178.0144500732422,
"logps/chosen": -358.923095703125,
"logps/rejected": -427.748291015625,
"loss": 0.6881,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.8779987096786499,
"rewards/margins": 0.9514387249946594,
"rewards/rejected": -1.829437494277954,
"step": 30
},
{
"epoch": 0.7582938388625592,
"eta": 0.004999999888241291,
"grad_norm": 93.43413782788734,
"learning_rate": 3.893311157806091e-07,
"logits/chosen": 173.6021728515625,
"logits/rejected": 164.51864624023438,
"logps/chosen": -390.3794860839844,
"logps/rejected": -413.7120666503906,
"loss": 0.6527,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -0.9942947626113892,
"rewards/margins": 0.9089801907539368,
"rewards/rejected": -1.9032748937606812,
"step": 40
},
{
"epoch": 0.9478672985781991,
"eta": 0.004999999888241291,
"grad_norm": 100.08364463289377,
"learning_rate": 3.126631330646801e-07,
"logits/chosen": 178.54013061523438,
"logits/rejected": 175.74232482910156,
"logps/chosen": -410.1822814941406,
"logps/rejected": -440.4054260253906,
"loss": 0.5915,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.44992417097091675,
"rewards/margins": 1.3113642930984497,
"rewards/rejected": -1.7612884044647217,
"step": 50
},
{
"epoch": 0.9478672985781991,
"eval_eta": 0.004999999888241291,
"eval_logits/chosen": 174.03684997558594,
"eval_logits/rejected": 171.63697814941406,
"eval_logps/chosen": -408.2999572753906,
"eval_logps/rejected": -458.15985107421875,
"eval_loss": 0.5744712352752686,
"eval_rewards/accuracies": 0.7021276354789734,
"eval_rewards/chosen": -0.721230685710907,
"eval_rewards/margins": 1.1333802938461304,
"eval_rewards/rejected": -1.8546110391616821,
"eval_runtime": 444.1021,
"eval_samples_per_second": 1.689,
"eval_steps_per_second": 0.212,
"step": 50
},
{
"epoch": 1.1374407582938388,
"eta": 0.004999999888241291,
"grad_norm": 45.26799050209758,
"learning_rate": 2.2891223348923882e-07,
"logits/chosen": 171.12948608398438,
"logits/rejected": 174.81674194335938,
"logps/chosen": -353.70318603515625,
"logps/rejected": -442.4922790527344,
"loss": 0.4059,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -0.18334674835205078,
"rewards/margins": 2.376243829727173,
"rewards/rejected": -2.5595908164978027,
"step": 60
},
{
"epoch": 1.3270142180094786,
"eta": 0.004999999888241291,
"grad_norm": 47.78511870505548,
"learning_rate": 1.4754491880085317e-07,
"logits/chosen": 162.48245239257812,
"logits/rejected": 170.5321044921875,
"logps/chosen": -348.3051452636719,
"logps/rejected": -411.1819763183594,
"loss": 0.2932,
"rewards/accuracies": 0.96875,
"rewards/chosen": 0.4100046753883362,
"rewards/margins": 2.892571449279785,
"rewards/rejected": -2.4825665950775146,
"step": 70
},
{
"epoch": 1.5165876777251186,
"eta": 0.004999999888241291,
"grad_norm": 49.92244645169992,
"learning_rate": 7.775827023107834e-08,
"logits/chosen": 158.89816284179688,
"logits/rejected": 175.38070678710938,
"logps/chosen": -390.493408203125,
"logps/rejected": -479.05596923828125,
"loss": 0.2611,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 0.07505069673061371,
"rewards/margins": 3.036956310272217,
"rewards/rejected": -2.9619057178497314,
"step": 80
},
{
"epoch": 1.7061611374407581,
"eta": 0.004999999888241291,
"grad_norm": 48.71805868934349,
"learning_rate": 2.7440387297912122e-08,
"logits/chosen": 162.8424530029297,
"logits/rejected": 171.51806640625,
"logps/chosen": -368.1020812988281,
"logps/rejected": -489.2860412597656,
"loss": 0.2467,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": 0.014399850741028786,
"rewards/margins": 3.4118752479553223,
"rewards/rejected": -3.397475481033325,
"step": 90
},
{
"epoch": 1.8957345971563981,
"eta": 0.004999999888241291,
"grad_norm": 36.80728075073422,
"learning_rate": 2.27878296044029e-09,
"logits/chosen": 163.50262451171875,
"logits/rejected": 165.15457153320312,
"logps/chosen": -376.7969970703125,
"logps/rejected": -453.5335998535156,
"loss": 0.2599,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 0.01423326600342989,
"rewards/margins": 3.457508087158203,
"rewards/rejected": -3.443274974822998,
"step": 100
},
{
"epoch": 1.8957345971563981,
"eval_eta": 0.004999999888241291,
"eval_logits/chosen": 163.3166046142578,
"eval_logits/rejected": 161.63723754882812,
"eval_logps/chosen": -408.9965515136719,
"eval_logps/rejected": -464.4193420410156,
"eval_loss": 0.5899427533149719,
"eval_rewards/accuracies": 0.7234042286872864,
"eval_rewards/chosen": -0.7560604810714722,
"eval_rewards/margins": 1.4115227460861206,
"eval_rewards/rejected": -2.167583465576172,
"eval_runtime": 445.1494,
"eval_samples_per_second": 1.685,
"eval_steps_per_second": 0.211,
"step": 100
},
{
"epoch": 1.971563981042654,
"step": 104,
"total_flos": 0.0,
"train_loss": 0.48392796287169826,
"train_runtime": 15946.9173,
"train_samples_per_second": 0.847,
"train_steps_per_second": 0.007
}
],
"logging_steps": 10,
"max_steps": 104,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 45,
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}