|
{ |
|
"best_metric": 0.6038, |
|
"best_model_checkpoint": "save_model/deberta-v3-base-amazon-reviews-multi/checkpoint-18750", |
|
"epoch": 5.0, |
|
"global_step": 31250, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.92e-05, |
|
"loss": 1.0637, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.8400000000000004e-05, |
|
"loss": 0.9832, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.76e-05, |
|
"loss": 0.9515, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.6800000000000006e-05, |
|
"loss": 0.9413, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 4.600000000000001e-05, |
|
"loss": 0.9464, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.52e-05, |
|
"loss": 0.9408, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4.44e-05, |
|
"loss": 0.9262, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.36e-05, |
|
"loss": 0.9273, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 4.2800000000000004e-05, |
|
"loss": 0.9227, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.2e-05, |
|
"loss": 0.9129, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 4.12e-05, |
|
"loss": 0.9246, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 4.0400000000000006e-05, |
|
"loss": 0.9134, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.6012, |
|
"eval_loss": 0.913094162940979, |
|
"eval_runtime": 11.6784, |
|
"eval_samples_per_second": 428.141, |
|
"eval_steps_per_second": 13.444, |
|
"step": 6250 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 3.960000000000001e-05, |
|
"loss": 0.8756, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 3.88e-05, |
|
"loss": 0.842, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 3.8e-05, |
|
"loss": 0.8358, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 3.72e-05, |
|
"loss": 0.8372, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 3.6400000000000004e-05, |
|
"loss": 0.8443, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 3.56e-05, |
|
"loss": 0.8442, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 3.48e-05, |
|
"loss": 0.8437, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 3.4000000000000007e-05, |
|
"loss": 0.8403, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 3.32e-05, |
|
"loss": 0.8521, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 3.24e-05, |
|
"loss": 0.8429, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 3.16e-05, |
|
"loss": 0.8416, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 3.08e-05, |
|
"loss": 0.8467, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 3e-05, |
|
"loss": 0.8411, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.6008, |
|
"eval_loss": 0.9258156418800354, |
|
"eval_runtime": 11.6504, |
|
"eval_samples_per_second": 429.17, |
|
"eval_steps_per_second": 13.476, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 2.9199999999999998e-05, |
|
"loss": 0.7412, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 2.84e-05, |
|
"loss": 0.745, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 2.7600000000000003e-05, |
|
"loss": 0.7507, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 2.6800000000000004e-05, |
|
"loss": 0.7455, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 2.6000000000000002e-05, |
|
"loss": 0.7532, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 2.5200000000000003e-05, |
|
"loss": 0.7484, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 2.44e-05, |
|
"loss": 0.7434, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 2.36e-05, |
|
"loss": 0.7497, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 2.2800000000000002e-05, |
|
"loss": 0.7409, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 2.2000000000000003e-05, |
|
"loss": 0.7404, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 2.12e-05, |
|
"loss": 0.7437, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 2.04e-05, |
|
"loss": 0.7503, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.6038, |
|
"eval_loss": 0.9382744431495667, |
|
"eval_runtime": 11.6538, |
|
"eval_samples_per_second": 429.045, |
|
"eval_steps_per_second": 13.472, |
|
"step": 18750 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 1.9600000000000002e-05, |
|
"loss": 0.6785, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 1.88e-05, |
|
"loss": 0.6265, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 1.8e-05, |
|
"loss": 0.628, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 1.7199999999999998e-05, |
|
"loss": 0.6207, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 1.6400000000000002e-05, |
|
"loss": 0.6261, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 1.56e-05, |
|
"loss": 0.6362, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 1.48e-05, |
|
"loss": 0.6292, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 1.4000000000000001e-05, |
|
"loss": 0.6117, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 1.32e-05, |
|
"loss": 0.6334, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 1.24e-05, |
|
"loss": 0.6245, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 1.16e-05, |
|
"loss": 0.626, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 1.08e-05, |
|
"loss": 0.6302, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 1e-05, |
|
"loss": 0.621, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.6026, |
|
"eval_loss": 1.052680492401123, |
|
"eval_runtime": 11.6694, |
|
"eval_samples_per_second": 428.472, |
|
"eval_steps_per_second": 13.454, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 9.2e-06, |
|
"loss": 0.5245, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 8.400000000000001e-06, |
|
"loss": 0.51, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 7.6e-06, |
|
"loss": 0.5238, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 6.800000000000001e-06, |
|
"loss": 0.5292, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 6e-06, |
|
"loss": 0.5049, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 5.2e-06, |
|
"loss": 0.5124, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 4.4e-06, |
|
"loss": 0.5096, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 3.6e-06, |
|
"loss": 0.5142, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 2.8000000000000003e-06, |
|
"loss": 0.5043, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 0.505, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 1.2000000000000002e-06, |
|
"loss": 0.4981, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"learning_rate": 4.0000000000000003e-07, |
|
"loss": 0.5088, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.5994, |
|
"eval_loss": 1.1826133728027344, |
|
"eval_runtime": 11.662, |
|
"eval_samples_per_second": 428.744, |
|
"eval_steps_per_second": 13.463, |
|
"step": 31250 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 31250, |
|
"total_flos": 6.5780715264e+16, |
|
"train_loss": 0.7340158759765625, |
|
"train_runtime": 7300.3061, |
|
"train_samples_per_second": 136.981, |
|
"train_steps_per_second": 4.281 |
|
} |
|
], |
|
"max_steps": 31250, |
|
"num_train_epochs": 5, |
|
"total_flos": 6.5780715264e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|