|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.03624173235480656, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.007248346470961312, |
|
"grad_norm": 0.07201674580574036, |
|
"learning_rate": 4.999451708687114e-06, |
|
"logits/chosen": -1.9793262481689453, |
|
"logits/rejected": -2.5381760597229004, |
|
"logps/chosen": -0.28126341104507446, |
|
"logps/rejected": -0.3779803514480591, |
|
"loss": 7.3904, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.4218950867652893, |
|
"rewards/margins": 0.14507544040679932, |
|
"rewards/rejected": -0.5669704675674438, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.014496692941922623, |
|
"grad_norm": 0.07562297582626343, |
|
"learning_rate": 4.997807075247147e-06, |
|
"logits/chosen": -2.0567643642425537, |
|
"logits/rejected": -2.4989147186279297, |
|
"logps/chosen": -0.27690139412879944, |
|
"logps/rejected": -0.33544114232063293, |
|
"loss": 7.3756, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -0.41535210609436035, |
|
"rewards/margins": 0.08780960738658905, |
|
"rewards/rejected": -0.5031617283821106, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.021745039412883936, |
|
"grad_norm": 0.09685570746660233, |
|
"learning_rate": 4.9950668210706795e-06, |
|
"logits/chosen": -2.10174298286438, |
|
"logits/rejected": -2.378197431564331, |
|
"logps/chosen": -0.26717427372932434, |
|
"logps/rejected": -0.30565372109413147, |
|
"loss": 7.451, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": -0.4007614254951477, |
|
"rewards/margins": 0.05771917849779129, |
|
"rewards/rejected": -0.4584805369377136, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.028993385883845247, |
|
"grad_norm": 0.08213861286640167, |
|
"learning_rate": 4.9912321481237616e-06, |
|
"logits/chosen": -2.1633317470550537, |
|
"logits/rejected": -2.387866497039795, |
|
"logps/chosen": -0.27634260058403015, |
|
"logps/rejected": -0.37035584449768066, |
|
"loss": 7.3892, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.4145139157772064, |
|
"rewards/margins": 0.14101983606815338, |
|
"rewards/rejected": -0.555533766746521, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03624173235480656, |
|
"grad_norm": 0.08846044540405273, |
|
"learning_rate": 4.986304738420684e-06, |
|
"logits/chosen": -2.1402599811553955, |
|
"logits/rejected": -2.4459526538848877, |
|
"logps/chosen": -0.2535383999347687, |
|
"logps/rejected": -0.3090876042842865, |
|
"loss": 7.5171, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": -0.3803076148033142, |
|
"rewards/margins": 0.08332376182079315, |
|
"rewards/rejected": -0.4636313319206238, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03624173235480656, |
|
"eval_logits/chosen": -2.1165692806243896, |
|
"eval_logits/rejected": -2.476428747177124, |
|
"eval_logps/chosen": -0.2828062176704407, |
|
"eval_logps/rejected": -0.3432886600494385, |
|
"eval_loss": 0.9120001792907715, |
|
"eval_rewards/accuracies": 0.5089285969734192, |
|
"eval_rewards/chosen": -0.4242093861103058, |
|
"eval_rewards/margins": 0.09072363376617432, |
|
"eval_rewards/rejected": -0.5149329900741577, |
|
"eval_runtime": 30.971, |
|
"eval_samples_per_second": 28.801, |
|
"eval_steps_per_second": 3.616, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.864510672595845e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|