File size: 4,667 Bytes
e2e8898 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 |
{
"best_metric": 0.538160469667319,
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-1/checkpoint-384",
"epoch": 8.0,
"eval_steps": 500,
"global_step": 768,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 0.40490686893463135,
"learning_rate": 1.4953262133881219e-05,
"loss": 0.1703,
"step": 96
},
{
"epoch": 1.0,
"eval_accuracy": 0.5,
"eval_f1": 0.0,
"eval_loss": 0.11485283076763153,
"eval_precision": 0.0,
"eval_recall": 0.0,
"eval_runtime": 25.7126,
"eval_samples_per_second": 39.747,
"eval_steps_per_second": 1.245,
"step": 96
},
{
"epoch": 2.0,
"grad_norm": 0.2503454089164734,
"learning_rate": 1.2817081829041045e-05,
"loss": 0.1095,
"step": 192
},
{
"epoch": 2.0,
"eval_accuracy": 0.5,
"eval_f1": 0.0038986354775828462,
"eval_loss": 0.0978715717792511,
"eval_precision": 0.5,
"eval_recall": 0.0019569471624266144,
"eval_runtime": 25.921,
"eval_samples_per_second": 39.427,
"eval_steps_per_second": 1.235,
"step": 192
},
{
"epoch": 3.0,
"grad_norm": 0.29292032122612,
"learning_rate": 1.0680901524200872e-05,
"loss": 0.0978,
"step": 288
},
{
"epoch": 3.0,
"eval_accuracy": 0.5352250489236791,
"eval_f1": 0.18244406196213422,
"eval_loss": 0.0902138277888298,
"eval_precision": 0.7571428571428571,
"eval_recall": 0.10371819960861056,
"eval_runtime": 25.3689,
"eval_samples_per_second": 40.286,
"eval_steps_per_second": 1.261,
"step": 288
},
{
"epoch": 4.0,
"grad_norm": 0.24781617522239685,
"learning_rate": 8.544721219360697e-06,
"loss": 0.0933,
"step": 384
},
{
"epoch": 4.0,
"eval_accuracy": 0.538160469667319,
"eval_f1": 0.1945392491467577,
"eval_loss": 0.08735258132219315,
"eval_precision": 0.76,
"eval_recall": 0.11154598825831702,
"eval_runtime": 25.5717,
"eval_samples_per_second": 39.966,
"eval_steps_per_second": 1.251,
"step": 384
},
{
"epoch": 5.0,
"grad_norm": 0.3007850646972656,
"learning_rate": 6.408540914520523e-06,
"loss": 0.0921,
"step": 480
},
{
"epoch": 5.0,
"eval_accuracy": 0.538160469667319,
"eval_f1": 0.1945392491467577,
"eval_loss": 0.08602822571992874,
"eval_precision": 0.76,
"eval_recall": 0.11154598825831702,
"eval_runtime": 26.0781,
"eval_samples_per_second": 39.19,
"eval_steps_per_second": 1.227,
"step": 480
},
{
"epoch": 6.0,
"grad_norm": 0.2958889305591583,
"learning_rate": 4.272360609680348e-06,
"loss": 0.0907,
"step": 576
},
{
"epoch": 6.0,
"eval_accuracy": 0.538160469667319,
"eval_f1": 0.1945392491467577,
"eval_loss": 0.08512929081916809,
"eval_precision": 0.76,
"eval_recall": 0.11154598825831702,
"eval_runtime": 25.8498,
"eval_samples_per_second": 39.536,
"eval_steps_per_second": 1.238,
"step": 576
},
{
"epoch": 7.0,
"grad_norm": 0.4746672511100769,
"learning_rate": 2.136180304840174e-06,
"loss": 0.0902,
"step": 672
},
{
"epoch": 7.0,
"eval_accuracy": 0.538160469667319,
"eval_f1": 0.1945392491467577,
"eval_loss": 0.0847223699092865,
"eval_precision": 0.76,
"eval_recall": 0.11154598825831702,
"eval_runtime": 25.8223,
"eval_samples_per_second": 39.578,
"eval_steps_per_second": 1.239,
"step": 672
},
{
"epoch": 8.0,
"grad_norm": 0.3903367817401886,
"learning_rate": 0.0,
"loss": 0.0897,
"step": 768
},
{
"epoch": 8.0,
"eval_accuracy": 0.538160469667319,
"eval_f1": 0.1945392491467577,
"eval_loss": 0.0845729261636734,
"eval_precision": 0.76,
"eval_recall": 0.11154598825831702,
"eval_runtime": 26.4963,
"eval_samples_per_second": 38.571,
"eval_steps_per_second": 1.208,
"step": 768
}
],
"logging_steps": 500,
"max_steps": 768,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 500,
"total_flos": 1885561578240.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": {
"alpha": 0.09656764013507046,
"learning_rate": 1.7089442438721394e-05,
"num_train_epochs": 8,
"temperature": 20
}
}
|