File size: 4,323 Bytes
9a00e55 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 |
{
"best_metric": 0.02853687417768363,
"best_model_checkpoint": "./checkpoints/easyrec-large",
"epoch": 2.588844433984467,
"eval_steps": 1000,
"global_step": 11000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11767474699929395,
"grad_norm": 99.0,
"learning_rate": 4.803875421667844e-05,
"loss": 2.8035,
"step": 500
},
{
"epoch": 0.2353494939985879,
"grad_norm": 120.5,
"learning_rate": 4.607750843335687e-05,
"loss": 2.3662,
"step": 1000
},
{
"epoch": 0.3530242409978819,
"grad_norm": 46.0,
"learning_rate": 4.411626265003531e-05,
"loss": 2.2534,
"step": 1500
},
{
"epoch": 0.4706989879971758,
"grad_norm": 41.5,
"learning_rate": 4.215501686671374e-05,
"loss": 2.1194,
"step": 2000
},
{
"epoch": 0.5883737349964697,
"grad_norm": 56.5,
"learning_rate": 4.019377108339217e-05,
"loss": 2.0655,
"step": 2500
},
{
"epoch": 0.7060484819957638,
"grad_norm": 56.25,
"learning_rate": 3.82325253000706e-05,
"loss": 2.084,
"step": 3000
},
{
"epoch": 0.8237232289950577,
"grad_norm": 44.0,
"learning_rate": 3.627127951674904e-05,
"loss": 2.0148,
"step": 3500
},
{
"epoch": 0.9413979759943516,
"grad_norm": 43.0,
"learning_rate": 3.431003373342748e-05,
"loss": 2.0612,
"step": 4000
},
{
"epoch": 1.0590727229936456,
"grad_norm": 80.5,
"learning_rate": 3.234878795010591e-05,
"loss": 1.9376,
"step": 4500
},
{
"epoch": 1.1767474699929394,
"grad_norm": 48.25,
"learning_rate": 3.0387542166784343e-05,
"loss": 1.9588,
"step": 5000
},
{
"epoch": 1.2944222169922335,
"grad_norm": 67.5,
"learning_rate": 2.8426296383462774e-05,
"loss": 1.9291,
"step": 5500
},
{
"epoch": 1.4120969639915275,
"grad_norm": 75.5,
"learning_rate": 2.6465050600141212e-05,
"loss": 1.9643,
"step": 6000
},
{
"epoch": 1.5297717109908213,
"grad_norm": 45.75,
"learning_rate": 2.4503804816819646e-05,
"loss": 1.9045,
"step": 6500
},
{
"epoch": 1.6474464579901154,
"grad_norm": 44.5,
"learning_rate": 2.2542559033498077e-05,
"loss": 1.9263,
"step": 7000
},
{
"epoch": 1.7651212049894092,
"grad_norm": 56.25,
"learning_rate": 2.0581313250176512e-05,
"loss": 1.9065,
"step": 7500
},
{
"epoch": 1.8827959519887032,
"grad_norm": 44.75,
"learning_rate": 1.862006746685495e-05,
"loss": 1.9187,
"step": 8000
},
{
"epoch": 2.0004706989879972,
"grad_norm": 57.0,
"learning_rate": 1.665882168353338e-05,
"loss": 1.9198,
"step": 8500
},
{
"epoch": 2.1181454459872913,
"grad_norm": 49.5,
"learning_rate": 1.4697575900211815e-05,
"loss": 1.9371,
"step": 9000
},
{
"epoch": 2.2358201929865853,
"grad_norm": 45.0,
"learning_rate": 1.273633011689025e-05,
"loss": 1.8927,
"step": 9500
},
{
"epoch": 2.353494939985879,
"grad_norm": 45.0,
"learning_rate": 1.0775084333568684e-05,
"loss": 1.8996,
"step": 10000
},
{
"epoch": 2.471169686985173,
"grad_norm": 34.75,
"learning_rate": 8.813838550247118e-06,
"loss": 1.8713,
"step": 10500
},
{
"epoch": 2.588844433984467,
"grad_norm": 37.75,
"learning_rate": 6.852592766925552e-06,
"loss": 1.9081,
"step": 11000
}
],
"logging_steps": 500,
"max_steps": 12747,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}
|