|
{ |
|
"best_metric": 0.85, |
|
"best_model_checkpoint": "vit-base-patch16-224-dmae-va-U5-42B/checkpoint-209", |
|
"epoch": 37.935483870967744, |
|
"eval_steps": 500, |
|
"global_step": 294, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.9, |
|
"eval_accuracy": 0.4666666666666667, |
|
"eval_loss": 1.3100930452346802, |
|
"eval_runtime": 2.1723, |
|
"eval_samples_per_second": 27.62, |
|
"eval_steps_per_second": 0.921, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"grad_norm": 2.6547346115112305, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 1.408, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"eval_accuracy": 0.48333333333333334, |
|
"eval_loss": 1.1884137392044067, |
|
"eval_runtime": 1.6114, |
|
"eval_samples_per_second": 37.236, |
|
"eval_steps_per_second": 1.241, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"grad_norm": 2.5833563804626465, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.1286, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"eval_accuracy": 0.5166666666666667, |
|
"eval_loss": 0.9475716352462769, |
|
"eval_runtime": 1.6132, |
|
"eval_samples_per_second": 37.192, |
|
"eval_steps_per_second": 1.24, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"grad_norm": 2.2686269283294678, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7589, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.75, |
|
"eval_loss": 0.7636658549308777, |
|
"eval_runtime": 1.6545, |
|
"eval_samples_per_second": 36.264, |
|
"eval_steps_per_second": 1.209, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"eval_accuracy": 0.6833333333333333, |
|
"eval_loss": 0.7185887694358826, |
|
"eval_runtime": 1.6444, |
|
"eval_samples_per_second": 36.488, |
|
"eval_steps_per_second": 1.216, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 5.16, |
|
"grad_norm": 2.46492600440979, |
|
"learning_rate": 4.810606060606061e-05, |
|
"loss": 0.4786, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 5.94, |
|
"eval_accuracy": 0.7833333333333333, |
|
"eval_loss": 0.619183361530304, |
|
"eval_runtime": 1.6099, |
|
"eval_samples_per_second": 37.269, |
|
"eval_steps_per_second": 1.242, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 6.45, |
|
"grad_norm": 2.310150384902954, |
|
"learning_rate": 4.621212121212121e-05, |
|
"loss": 0.2874, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 6.97, |
|
"eval_accuracy": 0.7833333333333333, |
|
"eval_loss": 0.6195262670516968, |
|
"eval_runtime": 1.7294, |
|
"eval_samples_per_second": 34.693, |
|
"eval_steps_per_second": 1.156, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 7.74, |
|
"grad_norm": 2.976644277572632, |
|
"learning_rate": 4.431818181818182e-05, |
|
"loss": 0.2027, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.7833333333333333, |
|
"eval_loss": 0.5958878993988037, |
|
"eval_runtime": 1.6615, |
|
"eval_samples_per_second": 36.113, |
|
"eval_steps_per_second": 1.204, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 8.9, |
|
"eval_accuracy": 0.7666666666666667, |
|
"eval_loss": 0.6104174852371216, |
|
"eval_runtime": 2.1769, |
|
"eval_samples_per_second": 27.562, |
|
"eval_steps_per_second": 0.919, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"grad_norm": 1.530189871788025, |
|
"learning_rate": 4.242424242424243e-05, |
|
"loss": 0.1662, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 9.94, |
|
"eval_accuracy": 0.75, |
|
"eval_loss": 0.7297105193138123, |
|
"eval_runtime": 1.8244, |
|
"eval_samples_per_second": 32.887, |
|
"eval_steps_per_second": 1.096, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 10.32, |
|
"grad_norm": 2.815868377685547, |
|
"learning_rate": 4.053030303030303e-05, |
|
"loss": 0.1462, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 10.97, |
|
"eval_accuracy": 0.7666666666666667, |
|
"eval_loss": 0.7851839065551758, |
|
"eval_runtime": 1.617, |
|
"eval_samples_per_second": 37.107, |
|
"eval_steps_per_second": 1.237, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 11.61, |
|
"grad_norm": 3.3121185302734375, |
|
"learning_rate": 3.8636363636363636e-05, |
|
"loss": 0.1419, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.7166666666666667, |
|
"eval_loss": 0.8637493848800659, |
|
"eval_runtime": 1.7943, |
|
"eval_samples_per_second": 33.44, |
|
"eval_steps_per_second": 1.115, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 12.9, |
|
"grad_norm": 1.8678604364395142, |
|
"learning_rate": 3.6742424242424246e-05, |
|
"loss": 0.1199, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 12.9, |
|
"eval_accuracy": 0.7333333333333333, |
|
"eval_loss": 0.6797261834144592, |
|
"eval_runtime": 1.6329, |
|
"eval_samples_per_second": 36.745, |
|
"eval_steps_per_second": 1.225, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 13.94, |
|
"eval_accuracy": 0.7666666666666667, |
|
"eval_loss": 0.766004204750061, |
|
"eval_runtime": 2.1818, |
|
"eval_samples_per_second": 27.5, |
|
"eval_steps_per_second": 0.917, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 14.19, |
|
"grad_norm": 1.7959256172180176, |
|
"learning_rate": 3.484848484848485e-05, |
|
"loss": 0.0949, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 14.97, |
|
"eval_accuracy": 0.7166666666666667, |
|
"eval_loss": 0.7385975122451782, |
|
"eval_runtime": 1.63, |
|
"eval_samples_per_second": 36.809, |
|
"eval_steps_per_second": 1.227, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 15.48, |
|
"grad_norm": 3.5541183948516846, |
|
"learning_rate": 3.295454545454545e-05, |
|
"loss": 0.0901, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.7, |
|
"eval_loss": 1.0125622749328613, |
|
"eval_runtime": 2.1501, |
|
"eval_samples_per_second": 27.905, |
|
"eval_steps_per_second": 0.93, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 16.77, |
|
"grad_norm": 1.4506371021270752, |
|
"learning_rate": 3.106060606060606e-05, |
|
"loss": 0.0808, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 16.9, |
|
"eval_accuracy": 0.8, |
|
"eval_loss": 0.7060254812240601, |
|
"eval_runtime": 1.656, |
|
"eval_samples_per_second": 36.232, |
|
"eval_steps_per_second": 1.208, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 17.94, |
|
"eval_accuracy": 0.7833333333333333, |
|
"eval_loss": 0.7856984734535217, |
|
"eval_runtime": 1.9667, |
|
"eval_samples_per_second": 30.508, |
|
"eval_steps_per_second": 1.017, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 18.06, |
|
"grad_norm": 1.090044617652893, |
|
"learning_rate": 2.916666666666667e-05, |
|
"loss": 0.102, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 18.97, |
|
"eval_accuracy": 0.8, |
|
"eval_loss": 0.741092324256897, |
|
"eval_runtime": 1.6707, |
|
"eval_samples_per_second": 35.913, |
|
"eval_steps_per_second": 1.197, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 19.35, |
|
"grad_norm": 1.416364073753357, |
|
"learning_rate": 2.7272727272727273e-05, |
|
"loss": 0.0706, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.8166666666666667, |
|
"eval_loss": 0.7340261340141296, |
|
"eval_runtime": 1.6384, |
|
"eval_samples_per_second": 36.622, |
|
"eval_steps_per_second": 1.221, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 20.65, |
|
"grad_norm": 1.222724199295044, |
|
"learning_rate": 2.537878787878788e-05, |
|
"loss": 0.0582, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 20.9, |
|
"eval_accuracy": 0.75, |
|
"eval_loss": 0.8588929772377014, |
|
"eval_runtime": 1.6461, |
|
"eval_samples_per_second": 36.45, |
|
"eval_steps_per_second": 1.215, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 21.94, |
|
"grad_norm": 0.9567591547966003, |
|
"learning_rate": 2.3484848484848487e-05, |
|
"loss": 0.0687, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 21.94, |
|
"eval_accuracy": 0.7666666666666667, |
|
"eval_loss": 0.8546371459960938, |
|
"eval_runtime": 1.6207, |
|
"eval_samples_per_second": 37.022, |
|
"eval_steps_per_second": 1.234, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 22.97, |
|
"eval_accuracy": 0.7666666666666667, |
|
"eval_loss": 0.7760646939277649, |
|
"eval_runtime": 1.9412, |
|
"eval_samples_per_second": 30.909, |
|
"eval_steps_per_second": 1.03, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 23.23, |
|
"grad_norm": 1.38846755027771, |
|
"learning_rate": 2.1590909090909093e-05, |
|
"loss": 0.0633, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_accuracy": 0.7666666666666667, |
|
"eval_loss": 0.8112250566482544, |
|
"eval_runtime": 1.5795, |
|
"eval_samples_per_second": 37.988, |
|
"eval_steps_per_second": 1.266, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 24.52, |
|
"grad_norm": 0.9209253191947937, |
|
"learning_rate": 1.9696969696969697e-05, |
|
"loss": 0.0626, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 24.9, |
|
"eval_accuracy": 0.8333333333333334, |
|
"eval_loss": 0.6942941546440125, |
|
"eval_runtime": 1.6177, |
|
"eval_samples_per_second": 37.089, |
|
"eval_steps_per_second": 1.236, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 25.81, |
|
"grad_norm": 0.7989098429679871, |
|
"learning_rate": 1.7803030303030303e-05, |
|
"loss": 0.0578, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 25.94, |
|
"eval_accuracy": 0.7833333333333333, |
|
"eval_loss": 0.8592663407325745, |
|
"eval_runtime": 1.6312, |
|
"eval_samples_per_second": 36.783, |
|
"eval_steps_per_second": 1.226, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 26.97, |
|
"eval_accuracy": 0.85, |
|
"eval_loss": 0.7214663028717041, |
|
"eval_runtime": 1.6092, |
|
"eval_samples_per_second": 37.286, |
|
"eval_steps_per_second": 1.243, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 27.1, |
|
"grad_norm": 1.5267945528030396, |
|
"learning_rate": 1.590909090909091e-05, |
|
"loss": 0.0434, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_accuracy": 0.8, |
|
"eval_loss": 0.8150014281272888, |
|
"eval_runtime": 1.5946, |
|
"eval_samples_per_second": 37.628, |
|
"eval_steps_per_second": 1.254, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 28.39, |
|
"grad_norm": 1.5845617055892944, |
|
"learning_rate": 1.4015151515151515e-05, |
|
"loss": 0.0492, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 28.9, |
|
"eval_accuracy": 0.7833333333333333, |
|
"eval_loss": 0.7833768725395203, |
|
"eval_runtime": 1.622, |
|
"eval_samples_per_second": 36.992, |
|
"eval_steps_per_second": 1.233, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 29.68, |
|
"grad_norm": 1.6783336400985718, |
|
"learning_rate": 1.2121212121212122e-05, |
|
"loss": 0.0582, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 29.94, |
|
"eval_accuracy": 0.7833333333333333, |
|
"eval_loss": 0.7843878269195557, |
|
"eval_runtime": 1.6057, |
|
"eval_samples_per_second": 37.367, |
|
"eval_steps_per_second": 1.246, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 30.97, |
|
"grad_norm": 0.9550512433052063, |
|
"learning_rate": 1.0227272727272729e-05, |
|
"loss": 0.0515, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 30.97, |
|
"eval_accuracy": 0.7666666666666667, |
|
"eval_loss": 0.7972704172134399, |
|
"eval_runtime": 1.5833, |
|
"eval_samples_per_second": 37.895, |
|
"eval_steps_per_second": 1.263, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_accuracy": 0.8, |
|
"eval_loss": 0.7744101285934448, |
|
"eval_runtime": 1.6139, |
|
"eval_samples_per_second": 37.177, |
|
"eval_steps_per_second": 1.239, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 32.26, |
|
"grad_norm": 1.3412911891937256, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.0487, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 32.9, |
|
"eval_accuracy": 0.75, |
|
"eval_loss": 0.8613999485969543, |
|
"eval_runtime": 1.6004, |
|
"eval_samples_per_second": 37.491, |
|
"eval_steps_per_second": 1.25, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 33.55, |
|
"grad_norm": 1.1709736585617065, |
|
"learning_rate": 6.43939393939394e-06, |
|
"loss": 0.0455, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 33.94, |
|
"eval_accuracy": 0.7666666666666667, |
|
"eval_loss": 0.8194929957389832, |
|
"eval_runtime": 1.6082, |
|
"eval_samples_per_second": 37.309, |
|
"eval_steps_per_second": 1.244, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 34.84, |
|
"grad_norm": 1.126597285270691, |
|
"learning_rate": 4.5454545454545455e-06, |
|
"loss": 0.0329, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 34.97, |
|
"eval_accuracy": 0.7666666666666667, |
|
"eval_loss": 0.832683801651001, |
|
"eval_runtime": 2.1485, |
|
"eval_samples_per_second": 27.927, |
|
"eval_steps_per_second": 0.931, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"eval_accuracy": 0.7666666666666667, |
|
"eval_loss": 0.8889188170433044, |
|
"eval_runtime": 1.6066, |
|
"eval_samples_per_second": 37.345, |
|
"eval_steps_per_second": 1.245, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 36.13, |
|
"grad_norm": 1.9044275283813477, |
|
"learning_rate": 2.651515151515152e-06, |
|
"loss": 0.0447, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 36.9, |
|
"eval_accuracy": 0.7666666666666667, |
|
"eval_loss": 0.8705342411994934, |
|
"eval_runtime": 1.6111, |
|
"eval_samples_per_second": 37.243, |
|
"eval_steps_per_second": 1.241, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 37.42, |
|
"grad_norm": 1.5608246326446533, |
|
"learning_rate": 7.575757575757576e-07, |
|
"loss": 0.0445, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 37.94, |
|
"eval_accuracy": 0.7666666666666667, |
|
"eval_loss": 0.8694610595703125, |
|
"eval_runtime": 1.8543, |
|
"eval_samples_per_second": 32.356, |
|
"eval_steps_per_second": 1.079, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 37.94, |
|
"step": 294, |
|
"total_flos": 2.864620236542755e+18, |
|
"train_loss": 0.20484183196510589, |
|
"train_runtime": 1635.0256, |
|
"train_samples_per_second": 25.02, |
|
"train_steps_per_second": 0.18 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 294, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 42, |
|
"save_steps": 500, |
|
"total_flos": 2.864620236542755e+18, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|