BERT-WMM / run-2 /checkpoint-801 /trainer_state.json
HikasaHana's picture
Training in progress, epoch 3
f524f11 verified
{
"best_metric": 0.5817901492118835,
"best_model_checkpoint": "BERT-WMM/run-2/checkpoint-534",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 801,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_accuracy": 0.7563380281690141,
"eval_loss": 0.6019130349159241,
"eval_runtime": 2.4784,
"eval_samples_per_second": 859.428,
"eval_steps_per_second": 54.067,
"step": 267
},
{
"epoch": 1.87,
"grad_norm": 9.674468994140625,
"learning_rate": 5.7987153604787005e-06,
"loss": 0.6061,
"step": 500
},
{
"epoch": 2.0,
"eval_accuracy": 0.7619718309859155,
"eval_loss": 0.5817901492118835,
"eval_runtime": 2.5361,
"eval_samples_per_second": 839.87,
"eval_steps_per_second": 52.837,
"step": 534
},
{
"epoch": 3.0,
"eval_accuracy": 0.7582159624413145,
"eval_loss": 0.6465383768081665,
"eval_runtime": 2.4952,
"eval_samples_per_second": 853.622,
"eval_steps_per_second": 53.702,
"step": 801
}
],
"logging_steps": 500,
"max_steps": 801,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 349221394826640.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": {
"per_device_train_batch_size": 32
}
}