whisper-tiny-javanese-openslr-v2 / trainer_state.json
bagasshw's picture
End of training
d0dddce verified
{
"best_global_step": null,
"best_metric": 0.47698996707825364,
"best_model_checkpoint": "/raid/p-storage/slp01_565a7357/bagas-fine-tune-whisper/whisper-tiny-javanese-openslr-v2/checkpoint-500",
"epoch": 4.32152117545376,
"eval_steps": 500,
"global_step": 2500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17286084701815038,
"grad_norm": 5.079843521118164,
"learning_rate": 7.840000000000001e-06,
"loss": 2.5618,
"step": 100
},
{
"epoch": 0.34572169403630076,
"grad_norm": 4.103299617767334,
"learning_rate": 1.584e-05,
"loss": 1.021,
"step": 200
},
{
"epoch": 0.5185825410544511,
"grad_norm": 3.8726439476013184,
"learning_rate": 1.9573333333333335e-05,
"loss": 0.7363,
"step": 300
},
{
"epoch": 0.6914433880726015,
"grad_norm": 3.9386720657348633,
"learning_rate": 1.8684444444444446e-05,
"loss": 0.6066,
"step": 400
},
{
"epoch": 0.8643042350907519,
"grad_norm": 3.273592472076416,
"learning_rate": 1.7795555555555557e-05,
"loss": 0.528,
"step": 500
},
{
"epoch": 0.8643042350907519,
"eval_loss": 0.4466875493526459,
"eval_runtime": 5073.2528,
"eval_samples_per_second": 3.647,
"eval_steps_per_second": 0.228,
"eval_wer": 0.47698996707825364,
"step": 500
},
{
"epoch": 1.0363007778738116,
"grad_norm": 3.0979788303375244,
"learning_rate": 1.690666666666667e-05,
"loss": 0.4728,
"step": 600
},
{
"epoch": 1.2091616248919619,
"grad_norm": 3.1844944953918457,
"learning_rate": 1.601777777777778e-05,
"loss": 0.4222,
"step": 700
},
{
"epoch": 1.3820224719101124,
"grad_norm": 2.981661319732666,
"learning_rate": 1.5128888888888891e-05,
"loss": 0.404,
"step": 800
},
{
"epoch": 1.5548833189282627,
"grad_norm": 3.146735906600952,
"learning_rate": 1.4240000000000001e-05,
"loss": 0.3867,
"step": 900
},
{
"epoch": 1.727744165946413,
"grad_norm": 2.873340368270874,
"learning_rate": 1.3351111111111112e-05,
"loss": 0.3702,
"step": 1000
},
{
"epoch": 1.727744165946413,
"eval_loss": 0.34238535165786743,
"eval_runtime": 6401.1103,
"eval_samples_per_second": 2.891,
"eval_steps_per_second": 0.181,
"eval_wer": 0.5528429218245372,
"step": 1000
},
{
"epoch": 1.902333621434745,
"grad_norm": 2.7809109687805176,
"learning_rate": 1.2462222222222222e-05,
"loss": 0.3611,
"step": 1100
},
{
"epoch": 2.076058772687986,
"grad_norm": 2.62384295463562,
"learning_rate": 1.1573333333333335e-05,
"loss": 0.3371,
"step": 1200
},
{
"epoch": 2.2489196197061365,
"grad_norm": 2.793154716491699,
"learning_rate": 1.0684444444444446e-05,
"loss": 0.3134,
"step": 1300
},
{
"epoch": 2.421780466724287,
"grad_norm": 2.5597171783447266,
"learning_rate": 9.795555555555556e-06,
"loss": 0.3069,
"step": 1400
},
{
"epoch": 2.5946413137424376,
"grad_norm": 2.6140079498291016,
"learning_rate": 8.906666666666667e-06,
"loss": 0.2988,
"step": 1500
},
{
"epoch": 2.5946413137424376,
"eval_loss": 0.30312925577163696,
"eval_runtime": 2962.721,
"eval_samples_per_second": 6.246,
"eval_steps_per_second": 0.391,
"eval_wer": 0.5552201690660703,
"step": 1500
},
{
"epoch": 2.767502160760588,
"grad_norm": 2.635817050933838,
"learning_rate": 8.017777777777779e-06,
"loss": 0.2948,
"step": 1600
},
{
"epoch": 2.940363007778738,
"grad_norm": 2.918149709701538,
"learning_rate": 7.12888888888889e-06,
"loss": 0.2903,
"step": 1700
},
{
"epoch": 3.1123595505617976,
"grad_norm": 2.2907419204711914,
"learning_rate": 6.24e-06,
"loss": 0.2711,
"step": 1800
},
{
"epoch": 3.2852203975799483,
"grad_norm": 2.640625238418579,
"learning_rate": 5.351111111111112e-06,
"loss": 0.2667,
"step": 1900
},
{
"epoch": 3.4580812445980986,
"grad_norm": 2.5180225372314453,
"learning_rate": 4.462222222222223e-06,
"loss": 0.2607,
"step": 2000
},
{
"epoch": 3.4580812445980986,
"eval_loss": 0.28587546944618225,
"eval_runtime": 2830.646,
"eval_samples_per_second": 6.537,
"eval_steps_per_second": 0.409,
"eval_wer": 0.6484880238350316,
"step": 2000
},
{
"epoch": 3.630942091616249,
"grad_norm": 2.6452524662017822,
"learning_rate": 3.5733333333333336e-06,
"loss": 0.2637,
"step": 2100
},
{
"epoch": 3.8038029386343992,
"grad_norm": 2.3364474773406982,
"learning_rate": 2.6844444444444445e-06,
"loss": 0.2615,
"step": 2200
},
{
"epoch": 3.9766637856525495,
"grad_norm": 2.414165496826172,
"learning_rate": 1.7955555555555556e-06,
"loss": 0.2603,
"step": 2300
},
{
"epoch": 4.148660328435609,
"grad_norm": 2.3572444915771484,
"learning_rate": 9.066666666666668e-07,
"loss": 0.2483,
"step": 2400
},
{
"epoch": 4.32152117545376,
"grad_norm": 2.6238534450531006,
"learning_rate": 1.777777777777778e-08,
"loss": 0.2481,
"step": 2500
},
{
"epoch": 4.32152117545376,
"eval_loss": 0.2792496979236603,
"eval_runtime": 2806.8606,
"eval_samples_per_second": 6.592,
"eval_steps_per_second": 0.412,
"eval_wer": 0.6471586421539112,
"step": 2500
},
{
"epoch": 4.32152117545376,
"step": 2500,
"total_flos": 1.574545111842816e+19,
"train_loss": 0.17131386260986328,
"train_runtime": 60097.0332,
"train_samples_per_second": 10.649,
"train_steps_per_second": 0.042
}
],
"logging_steps": 100,
"max_steps": 2500,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.574545111842816e+19,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}