|
{ |
|
"best_metric": 0.6522520780563354, |
|
"best_model_checkpoint": "bert_uncased_L-2_H-256_A-4_rte/checkpoint-80", |
|
"epoch": 13.0, |
|
"eval_steps": 500, |
|
"global_step": 130, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.49917563796043396, |
|
"learning_rate": 4.9e-05, |
|
"loss": 0.6981, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.5740072202166066, |
|
"eval_loss": 0.6831777691841125, |
|
"eval_runtime": 0.0995, |
|
"eval_samples_per_second": 2784.168, |
|
"eval_steps_per_second": 20.102, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.5291090607643127, |
|
"learning_rate": 4.8e-05, |
|
"loss": 0.6877, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.5740072202166066, |
|
"eval_loss": 0.6789401173591614, |
|
"eval_runtime": 0.1064, |
|
"eval_samples_per_second": 2603.336, |
|
"eval_steps_per_second": 18.797, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 0.6718031167984009, |
|
"learning_rate": 4.7e-05, |
|
"loss": 0.6794, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.5812274368231047, |
|
"eval_loss": 0.6745755076408386, |
|
"eval_runtime": 0.101, |
|
"eval_samples_per_second": 2741.744, |
|
"eval_steps_per_second": 19.796, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.6451788544654846, |
|
"learning_rate": 4.600000000000001e-05, |
|
"loss": 0.6685, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.5740072202166066, |
|
"eval_loss": 0.6702955365180969, |
|
"eval_runtime": 0.1088, |
|
"eval_samples_per_second": 2547.007, |
|
"eval_steps_per_second": 18.39, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 1.8607382774353027, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.6592, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.5848375451263538, |
|
"eval_loss": 0.6674469709396362, |
|
"eval_runtime": 0.1001, |
|
"eval_samples_per_second": 2767.996, |
|
"eval_steps_per_second": 19.986, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 0.7673319578170776, |
|
"learning_rate": 4.4000000000000006e-05, |
|
"loss": 0.6447, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.6028880866425993, |
|
"eval_loss": 0.6636605858802795, |
|
"eval_runtime": 0.1128, |
|
"eval_samples_per_second": 2454.592, |
|
"eval_steps_per_second": 17.723, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 1.0504330396652222, |
|
"learning_rate": 4.3e-05, |
|
"loss": 0.6238, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.5956678700361011, |
|
"eval_loss": 0.6564615368843079, |
|
"eval_runtime": 0.1002, |
|
"eval_samples_per_second": 2764.256, |
|
"eval_steps_per_second": 19.959, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 1.1754580736160278, |
|
"learning_rate": 4.2e-05, |
|
"loss": 0.6077, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.6028880866425993, |
|
"eval_loss": 0.6522520780563354, |
|
"eval_runtime": 0.1019, |
|
"eval_samples_per_second": 2719.558, |
|
"eval_steps_per_second": 19.636, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"grad_norm": 1.2603336572647095, |
|
"learning_rate": 4.1e-05, |
|
"loss": 0.5805, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.5884476534296029, |
|
"eval_loss": 0.655805766582489, |
|
"eval_runtime": 0.1018, |
|
"eval_samples_per_second": 2721.156, |
|
"eval_steps_per_second": 19.647, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 1.229230523109436, |
|
"learning_rate": 4e-05, |
|
"loss": 0.5502, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.5848375451263538, |
|
"eval_loss": 0.6610111594200134, |
|
"eval_runtime": 0.0993, |
|
"eval_samples_per_second": 2790.293, |
|
"eval_steps_per_second": 20.147, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"grad_norm": 1.8095612525939941, |
|
"learning_rate": 3.9000000000000006e-05, |
|
"loss": 0.5119, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.6064981949458483, |
|
"eval_loss": 0.6631617546081543, |
|
"eval_runtime": 0.0998, |
|
"eval_samples_per_second": 2775.522, |
|
"eval_steps_per_second": 20.04, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 1.673175573348999, |
|
"learning_rate": 3.8e-05, |
|
"loss": 0.4778, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.6028880866425993, |
|
"eval_loss": 0.678740918636322, |
|
"eval_runtime": 0.0996, |
|
"eval_samples_per_second": 2779.991, |
|
"eval_steps_per_second": 20.072, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"grad_norm": 1.942636251449585, |
|
"learning_rate": 3.7e-05, |
|
"loss": 0.4415, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy": 0.5956678700361011, |
|
"eval_loss": 0.7026975154876709, |
|
"eval_runtime": 0.1076, |
|
"eval_samples_per_second": 2573.991, |
|
"eval_steps_per_second": 18.585, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"step": 130, |
|
"total_flos": 81856452188160.0, |
|
"train_loss": 0.602387758401724, |
|
"train_runtime": 18.3661, |
|
"train_samples_per_second": 6778.809, |
|
"train_steps_per_second": 27.224 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 50, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 5 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 81856452188160.0, |
|
"train_batch_size": 256, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|