|
{ |
|
"best_metric": 2.003491163253784, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.014900908955446282, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00029801817910892565, |
|
"grad_norm": 1.4378103017807007, |
|
"learning_rate": 5e-05, |
|
"loss": 2.7429, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00029801817910892565, |
|
"eval_loss": 2.89186429977417, |
|
"eval_runtime": 2.9563, |
|
"eval_samples_per_second": 16.913, |
|
"eval_steps_per_second": 4.397, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0005960363582178513, |
|
"grad_norm": 1.5283856391906738, |
|
"learning_rate": 0.0001, |
|
"loss": 2.7683, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.000894054537326777, |
|
"grad_norm": 1.417280673980713, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 2.7067, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0011920727164357026, |
|
"grad_norm": 0.8284478187561035, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 2.5004, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0014900908955446282, |
|
"grad_norm": 1.5029010772705078, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 2.5042, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.001788109074653554, |
|
"grad_norm": 1.4401094913482666, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 2.4484, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0020861272537624795, |
|
"grad_norm": 1.0341869592666626, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 2.384, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0023841454328714052, |
|
"grad_norm": 0.655907928943634, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 2.3202, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.002682163611980331, |
|
"grad_norm": 0.5951255559921265, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 2.3046, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0029801817910892563, |
|
"grad_norm": 0.6929669380187988, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 2.269, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.003278199970198182, |
|
"grad_norm": 0.7978423833847046, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 2.3061, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.003576218149307108, |
|
"grad_norm": 0.6575497388839722, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 2.2067, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.003874236328416033, |
|
"grad_norm": 0.5472825169563293, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 2.3087, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.004172254507524959, |
|
"grad_norm": 0.5186141729354858, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 2.1952, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.004470272686633884, |
|
"grad_norm": 0.517930269241333, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 2.1748, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0047682908657428105, |
|
"grad_norm": 0.49700576066970825, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 2.1844, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.005066309044851736, |
|
"grad_norm": 0.5437249541282654, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 2.0521, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.005364327223960662, |
|
"grad_norm": 0.48748552799224854, |
|
"learning_rate": 7.75e-05, |
|
"loss": 2.1151, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.005662345403069587, |
|
"grad_norm": 0.5019696950912476, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 2.0675, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.005960363582178513, |
|
"grad_norm": 0.48214608430862427, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 2.0507, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.006258381761287439, |
|
"grad_norm": 0.46700602769851685, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 2.0569, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.006556399940396364, |
|
"grad_norm": 0.49726152420043945, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 2.1015, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0068544181195052895, |
|
"grad_norm": 0.4804721474647522, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 2.0625, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.007152436298614216, |
|
"grad_norm": 0.5244660377502441, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 2.0452, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.007450454477723141, |
|
"grad_norm": 0.5558761954307556, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 2.0891, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.007450454477723141, |
|
"eval_loss": 2.087225914001465, |
|
"eval_runtime": 2.3411, |
|
"eval_samples_per_second": 21.358, |
|
"eval_steps_per_second": 5.553, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.007748472656832066, |
|
"grad_norm": 0.47236016392707825, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 2.156, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.008046490835940992, |
|
"grad_norm": 0.5345865488052368, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 2.1204, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.008344509015049918, |
|
"grad_norm": 0.4965778887271881, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 2.0456, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.008642527194158844, |
|
"grad_norm": 0.45029252767562866, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 2.0003, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.008940545373267769, |
|
"grad_norm": 0.4446468949317932, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 2.0242, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.009238563552376695, |
|
"grad_norm": 0.48782452940940857, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 1.9474, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.009536581731485621, |
|
"grad_norm": 0.46959659457206726, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 1.9684, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.009834599910594547, |
|
"grad_norm": 0.4955771565437317, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 2.0053, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.010132618089703472, |
|
"grad_norm": 0.4691748023033142, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 1.9942, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.010430636268812398, |
|
"grad_norm": 0.5127565264701843, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 2.0143, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.010728654447921324, |
|
"grad_norm": 0.516806423664093, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 2.0691, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.011026672627030248, |
|
"grad_norm": 0.533613920211792, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 2.0478, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.011324690806139175, |
|
"grad_norm": 0.42579934000968933, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 2.0956, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0116227089852481, |
|
"grad_norm": 0.4248853623867035, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 2.1222, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.011920727164357025, |
|
"grad_norm": 0.41036468744277954, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 2.0217, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.012218745343465951, |
|
"grad_norm": 0.42496877908706665, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 2.0106, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.012516763522574878, |
|
"grad_norm": 0.4574867784976959, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 1.9419, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.012814781701683802, |
|
"grad_norm": 0.4341859221458435, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 1.9376, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.013112799880792728, |
|
"grad_norm": 0.40266716480255127, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 2.0011, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.013410818059901655, |
|
"grad_norm": 0.4303281009197235, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 1.998, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.013708836239010579, |
|
"grad_norm": 0.4447619616985321, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 1.999, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.014006854418119505, |
|
"grad_norm": 0.4733419716358185, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 2.0109, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.014304872597228431, |
|
"grad_norm": 0.5527592301368713, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 1.956, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.014602890776337356, |
|
"grad_norm": 0.5123348832130432, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 1.9812, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.014900908955446282, |
|
"grad_norm": 0.5983260273933411, |
|
"learning_rate": 1e-05, |
|
"loss": 2.1283, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.014900908955446282, |
|
"eval_loss": 2.003491163253784, |
|
"eval_runtime": 2.3523, |
|
"eval_samples_per_second": 21.256, |
|
"eval_steps_per_second": 5.527, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.969951863666115e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|