bert-labr-unbalanced / trainer_state.json
mofawzy's picture
Fine tuned BERT sequence classification model for LABR dataset
c67b820
raw
history blame
1.39 kB
{
"best_metric": 0.20763197541236877,
"best_model_checkpoint": "BERT-LABR-unbalanced/checkpoint-1000",
"epoch": 0.9784735812133072,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.49,
"learning_rate": 8.369210697977822e-06,
"loss": 0.2714,
"step": 500
},
{
"epoch": 0.49,
"eval_accuracy": 0.908311910882605,
"eval_f1": 0.9456024402643619,
"eval_loss": 0.24122537672519684,
"eval_precision": 0.9419765591086673,
"eval_recall": 0.9492563429571304,
"eval_runtime": 109.0386,
"eval_samples_per_second": 74.918,
"eval_steps_per_second": 2.348,
"step": 500
},
{
"epoch": 0.98,
"learning_rate": 6.738421395955643e-06,
"loss": 0.2258,
"step": 1000
},
{
"epoch": 0.98,
"eval_accuracy": 0.9215326233321092,
"eval_f1": 0.9538483692130463,
"eval_loss": 0.20763197541236877,
"eval_precision": 0.9421134973687953,
"eval_recall": 0.9658792650918635,
"eval_runtime": 108.4256,
"eval_samples_per_second": 75.342,
"eval_steps_per_second": 2.361,
"step": 1000
}
],
"max_steps": 3066,
"num_train_epochs": 3,
"total_flos": 3727725453312000.0,
"trial_name": null,
"trial_params": null
}