{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 2.9972041006523766, | |
"eval_steps": 500, | |
"global_step": 201, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.03, | |
"learning_rate": 2e-05, | |
"loss": 2.5636, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 2e-05, | |
"loss": 1.8753, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 2e-05, | |
"loss": 2.041, | |
"step": 6 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 2e-05, | |
"loss": 1.8559, | |
"step": 8 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 2e-05, | |
"loss": 1.4474, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 2e-05, | |
"loss": 1.7762, | |
"step": 12 | |
}, | |
{ | |
"epoch": 0.21, | |
"learning_rate": 2e-05, | |
"loss": 2.2098, | |
"step": 14 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 2e-05, | |
"loss": 2.4049, | |
"step": 16 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 2e-05, | |
"loss": 2.0104, | |
"step": 18 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 2e-05, | |
"loss": 1.7537, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.33, | |
"learning_rate": 2e-05, | |
"loss": 1.9272, | |
"step": 22 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 2e-05, | |
"loss": 1.8277, | |
"step": 24 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 2e-05, | |
"loss": 1.6745, | |
"step": 26 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 2e-05, | |
"loss": 1.8943, | |
"step": 28 | |
}, | |
{ | |
"epoch": 0.45, | |
"learning_rate": 2e-05, | |
"loss": 2.3391, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 2e-05, | |
"loss": 2.2157, | |
"step": 32 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 2e-05, | |
"loss": 2.419, | |
"step": 34 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 2e-05, | |
"loss": 2.0958, | |
"step": 36 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 2e-05, | |
"loss": 1.9335, | |
"step": 38 | |
}, | |
{ | |
"epoch": 0.6, | |
"learning_rate": 2e-05, | |
"loss": 1.8017, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 2e-05, | |
"loss": 1.5031, | |
"step": 42 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 2e-05, | |
"loss": 1.7947, | |
"step": 44 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 2e-05, | |
"loss": 2.099, | |
"step": 46 | |
}, | |
{ | |
"epoch": 0.72, | |
"learning_rate": 2e-05, | |
"loss": 1.9099, | |
"step": 48 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 2e-05, | |
"loss": 2.5796, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 2e-05, | |
"loss": 1.8187, | |
"step": 52 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 2e-05, | |
"loss": 1.7782, | |
"step": 54 | |
}, | |
{ | |
"epoch": 0.84, | |
"learning_rate": 2e-05, | |
"loss": 1.8946, | |
"step": 56 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 2e-05, | |
"loss": 1.6698, | |
"step": 58 | |
}, | |
{ | |
"epoch": 0.89, | |
"learning_rate": 2e-05, | |
"loss": 1.5231, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 2e-05, | |
"loss": 1.6646, | |
"step": 62 | |
}, | |
{ | |
"epoch": 0.95, | |
"learning_rate": 2e-05, | |
"loss": 1.8845, | |
"step": 64 | |
}, | |
{ | |
"epoch": 0.98, | |
"learning_rate": 2e-05, | |
"loss": 1.6904, | |
"step": 66 | |
}, | |
{ | |
"epoch": 1.01, | |
"learning_rate": 2e-05, | |
"loss": 2.212, | |
"step": 68 | |
}, | |
{ | |
"epoch": 1.04, | |
"learning_rate": 2e-05, | |
"loss": 1.7614, | |
"step": 70 | |
}, | |
{ | |
"epoch": 1.07, | |
"learning_rate": 2e-05, | |
"loss": 1.7902, | |
"step": 72 | |
}, | |
{ | |
"epoch": 1.1, | |
"learning_rate": 2e-05, | |
"loss": 1.9249, | |
"step": 74 | |
}, | |
{ | |
"epoch": 1.13, | |
"learning_rate": 2e-05, | |
"loss": 1.5314, | |
"step": 76 | |
}, | |
{ | |
"epoch": 1.16, | |
"learning_rate": 2e-05, | |
"loss": 1.4637, | |
"step": 78 | |
}, | |
{ | |
"epoch": 1.19, | |
"learning_rate": 2e-05, | |
"loss": 1.6649, | |
"step": 80 | |
}, | |
{ | |
"epoch": 1.22, | |
"learning_rate": 2e-05, | |
"loss": 2.007, | |
"step": 82 | |
}, | |
{ | |
"epoch": 1.25, | |
"learning_rate": 2e-05, | |
"loss": 1.7585, | |
"step": 84 | |
}, | |
{ | |
"epoch": 1.28, | |
"learning_rate": 2e-05, | |
"loss": 2.006, | |
"step": 86 | |
}, | |
{ | |
"epoch": 1.31, | |
"learning_rate": 2e-05, | |
"loss": 1.7321, | |
"step": 88 | |
}, | |
{ | |
"epoch": 1.34, | |
"learning_rate": 2e-05, | |
"loss": 1.8055, | |
"step": 90 | |
}, | |
{ | |
"epoch": 1.37, | |
"learning_rate": 2e-05, | |
"loss": 1.7243, | |
"step": 92 | |
}, | |
{ | |
"epoch": 1.4, | |
"learning_rate": 2e-05, | |
"loss": 1.4223, | |
"step": 94 | |
}, | |
{ | |
"epoch": 1.43, | |
"learning_rate": 2e-05, | |
"loss": 1.6713, | |
"step": 96 | |
}, | |
{ | |
"epoch": 1.46, | |
"learning_rate": 2e-05, | |
"loss": 1.7817, | |
"step": 98 | |
}, | |
{ | |
"epoch": 1.49, | |
"learning_rate": 2e-05, | |
"loss": 1.5739, | |
"step": 100 | |
}, | |
{ | |
"epoch": 1.52, | |
"learning_rate": 2e-05, | |
"loss": 2.503, | |
"step": 102 | |
}, | |
{ | |
"epoch": 1.55, | |
"learning_rate": 2e-05, | |
"loss": 1.7568, | |
"step": 104 | |
}, | |
{ | |
"epoch": 1.58, | |
"learning_rate": 2e-05, | |
"loss": 1.764, | |
"step": 106 | |
}, | |
{ | |
"epoch": 1.61, | |
"learning_rate": 2e-05, | |
"loss": 1.7051, | |
"step": 108 | |
}, | |
{ | |
"epoch": 1.64, | |
"learning_rate": 2e-05, | |
"loss": 1.2782, | |
"step": 110 | |
}, | |
{ | |
"epoch": 1.67, | |
"learning_rate": 2e-05, | |
"loss": 1.6762, | |
"step": 112 | |
}, | |
{ | |
"epoch": 1.7, | |
"learning_rate": 2e-05, | |
"loss": 1.9468, | |
"step": 114 | |
}, | |
{ | |
"epoch": 1.73, | |
"learning_rate": 2e-05, | |
"loss": 1.8962, | |
"step": 116 | |
}, | |
{ | |
"epoch": 1.76, | |
"learning_rate": 2e-05, | |
"loss": 2.3161, | |
"step": 118 | |
}, | |
{ | |
"epoch": 1.79, | |
"learning_rate": 2e-05, | |
"loss": 1.7126, | |
"step": 120 | |
}, | |
{ | |
"epoch": 1.82, | |
"learning_rate": 2e-05, | |
"loss": 1.7572, | |
"step": 122 | |
}, | |
{ | |
"epoch": 1.85, | |
"learning_rate": 2e-05, | |
"loss": 1.5726, | |
"step": 124 | |
}, | |
{ | |
"epoch": 1.88, | |
"learning_rate": 2e-05, | |
"loss": 1.2902, | |
"step": 126 | |
}, | |
{ | |
"epoch": 1.91, | |
"learning_rate": 2e-05, | |
"loss": 1.7416, | |
"step": 128 | |
}, | |
{ | |
"epoch": 1.94, | |
"learning_rate": 2e-05, | |
"loss": 1.8538, | |
"step": 130 | |
}, | |
{ | |
"epoch": 1.97, | |
"learning_rate": 2e-05, | |
"loss": 1.2801, | |
"step": 132 | |
}, | |
{ | |
"epoch": 2.0, | |
"learning_rate": 2e-05, | |
"loss": 1.6297, | |
"step": 134 | |
}, | |
{ | |
"epoch": 2.03, | |
"learning_rate": 2e-05, | |
"loss": 2.2701, | |
"step": 136 | |
}, | |
{ | |
"epoch": 2.06, | |
"learning_rate": 2e-05, | |
"loss": 1.7373, | |
"step": 138 | |
}, | |
{ | |
"epoch": 2.09, | |
"learning_rate": 2e-05, | |
"loss": 1.8102, | |
"step": 140 | |
}, | |
{ | |
"epoch": 2.12, | |
"learning_rate": 2e-05, | |
"loss": 1.659, | |
"step": 142 | |
}, | |
{ | |
"epoch": 2.15, | |
"learning_rate": 2e-05, | |
"loss": 1.5476, | |
"step": 144 | |
}, | |
{ | |
"epoch": 2.18, | |
"learning_rate": 2e-05, | |
"loss": 1.4895, | |
"step": 146 | |
}, | |
{ | |
"epoch": 2.21, | |
"learning_rate": 2e-05, | |
"loss": 1.6523, | |
"step": 148 | |
}, | |
{ | |
"epoch": 2.24, | |
"learning_rate": 2e-05, | |
"loss": 1.4569, | |
"step": 150 | |
}, | |
{ | |
"epoch": 2.27, | |
"learning_rate": 2e-05, | |
"loss": 2.4678, | |
"step": 152 | |
}, | |
{ | |
"epoch": 2.3, | |
"learning_rate": 2e-05, | |
"loss": 1.9758, | |
"step": 154 | |
}, | |
{ | |
"epoch": 2.33, | |
"learning_rate": 2e-05, | |
"loss": 1.7601, | |
"step": 156 | |
}, | |
{ | |
"epoch": 2.36, | |
"learning_rate": 2e-05, | |
"loss": 1.8439, | |
"step": 158 | |
}, | |
{ | |
"epoch": 2.39, | |
"learning_rate": 2e-05, | |
"loss": 1.392, | |
"step": 160 | |
}, | |
{ | |
"epoch": 2.42, | |
"learning_rate": 2e-05, | |
"loss": 1.5195, | |
"step": 162 | |
}, | |
{ | |
"epoch": 2.45, | |
"learning_rate": 2e-05, | |
"loss": 1.5823, | |
"step": 164 | |
}, | |
{ | |
"epoch": 2.48, | |
"learning_rate": 2e-05, | |
"loss": 1.534, | |
"step": 166 | |
}, | |
{ | |
"epoch": 2.51, | |
"learning_rate": 2e-05, | |
"loss": 2.0172, | |
"step": 168 | |
}, | |
{ | |
"epoch": 2.53, | |
"learning_rate": 2e-05, | |
"loss": 1.6927, | |
"step": 170 | |
}, | |
{ | |
"epoch": 2.56, | |
"learning_rate": 2e-05, | |
"loss": 1.8205, | |
"step": 172 | |
}, | |
{ | |
"epoch": 2.59, | |
"learning_rate": 2e-05, | |
"loss": 1.8063, | |
"step": 174 | |
}, | |
{ | |
"epoch": 2.62, | |
"learning_rate": 2e-05, | |
"loss": 1.3506, | |
"step": 176 | |
}, | |
{ | |
"epoch": 2.65, | |
"learning_rate": 2e-05, | |
"loss": 1.3634, | |
"step": 178 | |
}, | |
{ | |
"epoch": 2.68, | |
"learning_rate": 2e-05, | |
"loss": 1.6887, | |
"step": 180 | |
}, | |
{ | |
"epoch": 2.71, | |
"learning_rate": 2e-05, | |
"loss": 1.6399, | |
"step": 182 | |
}, | |
{ | |
"epoch": 2.74, | |
"learning_rate": 2e-05, | |
"loss": 2.5611, | |
"step": 184 | |
}, | |
{ | |
"epoch": 2.77, | |
"learning_rate": 2e-05, | |
"loss": 1.7254, | |
"step": 186 | |
}, | |
{ | |
"epoch": 2.8, | |
"learning_rate": 2e-05, | |
"loss": 1.8264, | |
"step": 188 | |
}, | |
{ | |
"epoch": 2.83, | |
"learning_rate": 2e-05, | |
"loss": 1.6687, | |
"step": 190 | |
}, | |
{ | |
"epoch": 2.86, | |
"learning_rate": 2e-05, | |
"loss": 1.1035, | |
"step": 192 | |
}, | |
{ | |
"epoch": 2.89, | |
"learning_rate": 2e-05, | |
"loss": 1.6223, | |
"step": 194 | |
}, | |
{ | |
"epoch": 2.92, | |
"learning_rate": 2e-05, | |
"loss": 1.7795, | |
"step": 196 | |
}, | |
{ | |
"epoch": 2.95, | |
"learning_rate": 2e-05, | |
"loss": 1.4972, | |
"step": 198 | |
}, | |
{ | |
"epoch": 2.98, | |
"learning_rate": 2e-05, | |
"loss": 1.3781, | |
"step": 200 | |
} | |
], | |
"logging_steps": 2, | |
"max_steps": 201, | |
"num_train_epochs": 3, | |
"save_steps": 500, | |
"total_flos": 1.4137758692634624e+16, | |
"trial_name": null, | |
"trial_params": null | |
} | |