litagin's picture
Training in progress, step 500, checkpoint
44ae650 verified
raw
history blame
9.1 kB
{
"best_metric": 9.4224620303757,
"best_model_checkpoint": "kotoba_v2_enc_logs_epoch2_2/checkpoint-500",
"epoch": 0.0017133497358014708,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 3.4266994716029415e-05,
"grad_norm": 1.0561553239822388,
"learning_rate": 1e-05,
"loss": 0.2361,
"step": 10
},
{
"epoch": 6.853398943205883e-05,
"grad_norm": 1.1626238822937012,
"learning_rate": 1e-05,
"loss": 0.2265,
"step": 20
},
{
"epoch": 0.00010280098414808825,
"grad_norm": 0.9845689535140991,
"learning_rate": 1e-05,
"loss": 0.2279,
"step": 30
},
{
"epoch": 0.00013706797886411766,
"grad_norm": 1.142356276512146,
"learning_rate": 1e-05,
"loss": 0.2382,
"step": 40
},
{
"epoch": 0.00017133497358014707,
"grad_norm": 1.0053240060806274,
"learning_rate": 1e-05,
"loss": 0.2473,
"step": 50
},
{
"epoch": 0.0002056019682961765,
"grad_norm": 1.1098105907440186,
"learning_rate": 1e-05,
"loss": 0.2438,
"step": 60
},
{
"epoch": 0.0002398689630122059,
"grad_norm": 1.191983699798584,
"learning_rate": 1e-05,
"loss": 0.2293,
"step": 70
},
{
"epoch": 0.0002741359577282353,
"grad_norm": 1.1295104026794434,
"learning_rate": 1e-05,
"loss": 0.2362,
"step": 80
},
{
"epoch": 0.0003084029524442647,
"grad_norm": 1.037972092628479,
"learning_rate": 1e-05,
"loss": 0.2455,
"step": 90
},
{
"epoch": 0.00034266994716029413,
"grad_norm": 1.1975648403167725,
"learning_rate": 1e-05,
"loss": 0.2459,
"step": 100
},
{
"epoch": 0.00037693694187632354,
"grad_norm": 1.0676342248916626,
"learning_rate": 1e-05,
"loss": 0.2271,
"step": 110
},
{
"epoch": 0.000411203936592353,
"grad_norm": 1.0749495029449463,
"learning_rate": 1e-05,
"loss": 0.2417,
"step": 120
},
{
"epoch": 0.0004454709313083824,
"grad_norm": 1.094260811805725,
"learning_rate": 1e-05,
"loss": 0.2354,
"step": 130
},
{
"epoch": 0.0004797379260244118,
"grad_norm": 1.0395853519439697,
"learning_rate": 1e-05,
"loss": 0.2381,
"step": 140
},
{
"epoch": 0.0005140049207404412,
"grad_norm": 1.2008885145187378,
"learning_rate": 1e-05,
"loss": 0.2354,
"step": 150
},
{
"epoch": 0.0005482719154564706,
"grad_norm": 1.0647832155227661,
"learning_rate": 1e-05,
"loss": 0.2321,
"step": 160
},
{
"epoch": 0.0005825389101725,
"grad_norm": 1.327071189880371,
"learning_rate": 1e-05,
"loss": 0.238,
"step": 170
},
{
"epoch": 0.0006168059048885295,
"grad_norm": 1.1184055805206299,
"learning_rate": 1e-05,
"loss": 0.2242,
"step": 180
},
{
"epoch": 0.0006510728996045589,
"grad_norm": 1.2512784004211426,
"learning_rate": 1e-05,
"loss": 0.2437,
"step": 190
},
{
"epoch": 0.0006853398943205883,
"grad_norm": 1.0614465475082397,
"learning_rate": 1e-05,
"loss": 0.2382,
"step": 200
},
{
"epoch": 0.0007196068890366177,
"grad_norm": 1.0607149600982666,
"learning_rate": 1e-05,
"loss": 0.2381,
"step": 210
},
{
"epoch": 0.0007538738837526471,
"grad_norm": 1.0422028303146362,
"learning_rate": 1e-05,
"loss": 0.2294,
"step": 220
},
{
"epoch": 0.0007881408784686765,
"grad_norm": 1.0162984132766724,
"learning_rate": 1e-05,
"loss": 0.2275,
"step": 230
},
{
"epoch": 0.000822407873184706,
"grad_norm": 1.1085543632507324,
"learning_rate": 1e-05,
"loss": 0.2161,
"step": 240
},
{
"epoch": 0.0008566748679007354,
"grad_norm": 1.1854636669158936,
"learning_rate": 1e-05,
"loss": 0.2382,
"step": 250
},
{
"epoch": 0.0008909418626167648,
"grad_norm": 1.40137779712677,
"learning_rate": 1e-05,
"loss": 0.2579,
"step": 260
},
{
"epoch": 0.0009252088573327942,
"grad_norm": 1.0814112424850464,
"learning_rate": 1e-05,
"loss": 0.2612,
"step": 270
},
{
"epoch": 0.0009594758520488236,
"grad_norm": 1.083736538887024,
"learning_rate": 1e-05,
"loss": 0.2711,
"step": 280
},
{
"epoch": 0.000993742846764853,
"grad_norm": 1.0861411094665527,
"learning_rate": 1e-05,
"loss": 0.2642,
"step": 290
},
{
"epoch": 0.0010280098414808825,
"grad_norm": 1.1141265630722046,
"learning_rate": 1e-05,
"loss": 0.2585,
"step": 300
},
{
"epoch": 0.0010622768361969119,
"grad_norm": 1.326241374015808,
"learning_rate": 1e-05,
"loss": 0.2858,
"step": 310
},
{
"epoch": 0.0010965438309129413,
"grad_norm": 1.393750786781311,
"learning_rate": 1e-05,
"loss": 0.2635,
"step": 320
},
{
"epoch": 0.0011308108256289707,
"grad_norm": 1.0851459503173828,
"learning_rate": 1e-05,
"loss": 0.2565,
"step": 330
},
{
"epoch": 0.001165077820345,
"grad_norm": 1.2323757410049438,
"learning_rate": 1e-05,
"loss": 0.2465,
"step": 340
},
{
"epoch": 0.0011993448150610295,
"grad_norm": 1.376953125,
"learning_rate": 1e-05,
"loss": 0.2671,
"step": 350
},
{
"epoch": 0.001233611809777059,
"grad_norm": 1.084592580795288,
"learning_rate": 1e-05,
"loss": 0.2643,
"step": 360
},
{
"epoch": 0.0012678788044930883,
"grad_norm": 1.2907005548477173,
"learning_rate": 1e-05,
"loss": 0.2584,
"step": 370
},
{
"epoch": 0.0013021457992091177,
"grad_norm": 1.0698130130767822,
"learning_rate": 1e-05,
"loss": 0.2526,
"step": 380
},
{
"epoch": 0.0013364127939251471,
"grad_norm": 1.1399807929992676,
"learning_rate": 1e-05,
"loss": 0.2759,
"step": 390
},
{
"epoch": 0.0013706797886411765,
"grad_norm": 1.1480791568756104,
"learning_rate": 1e-05,
"loss": 0.2499,
"step": 400
},
{
"epoch": 0.001404946783357206,
"grad_norm": 1.3095237016677856,
"learning_rate": 1e-05,
"loss": 0.2536,
"step": 410
},
{
"epoch": 0.0014392137780732353,
"grad_norm": 1.068246841430664,
"learning_rate": 1e-05,
"loss": 0.2604,
"step": 420
},
{
"epoch": 0.0014734807727892648,
"grad_norm": 1.2310419082641602,
"learning_rate": 1e-05,
"loss": 0.2632,
"step": 430
},
{
"epoch": 0.0015077477675052942,
"grad_norm": 1.161867380142212,
"learning_rate": 1e-05,
"loss": 0.2584,
"step": 440
},
{
"epoch": 0.0015420147622213236,
"grad_norm": 1.1461217403411865,
"learning_rate": 1e-05,
"loss": 0.2592,
"step": 450
},
{
"epoch": 0.001576281756937353,
"grad_norm": 1.3006030321121216,
"learning_rate": 1e-05,
"loss": 0.2607,
"step": 460
},
{
"epoch": 0.0016105487516533824,
"grad_norm": 1.1223125457763672,
"learning_rate": 1e-05,
"loss": 0.2433,
"step": 470
},
{
"epoch": 0.001644815746369412,
"grad_norm": 1.2909380197525024,
"learning_rate": 1e-05,
"loss": 0.2693,
"step": 480
},
{
"epoch": 0.0016790827410854414,
"grad_norm": 1.2270597219467163,
"learning_rate": 1e-05,
"loss": 0.2661,
"step": 490
},
{
"epoch": 0.0017133497358014708,
"grad_norm": 1.1439770460128784,
"learning_rate": 1e-05,
"loss": 0.2517,
"step": 500
},
{
"epoch": 0.0017133497358014708,
"eval_cer": 13.0358087846181,
"eval_loss": 0.25224336981773376,
"eval_normalized_cer": 9.4224620303757,
"eval_runtime": 227.2174,
"eval_samples_per_second": 2.253,
"eval_steps_per_second": 0.035,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 291826,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.0137951535104e+20,
"train_batch_size": 128,
"trial_name": null,
"trial_params": null
}