|
{ |
|
"best_metric": 1.4174914360046387, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.08643042350907519, |
|
"eval_steps": 50, |
|
"global_step": 150, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0005762028233938346, |
|
"eval_loss": 2.759286642074585, |
|
"eval_runtime": 44.2859, |
|
"eval_samples_per_second": 16.506, |
|
"eval_steps_per_second": 4.132, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005762028233938346, |
|
"grad_norm": 0.6934571266174316, |
|
"learning_rate": 4.3e-05, |
|
"loss": 2.2019, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.011524056467876692, |
|
"grad_norm": 1.1018927097320557, |
|
"learning_rate": 8.6e-05, |
|
"loss": 2.072, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01728608470181504, |
|
"grad_norm": 1.6100819110870361, |
|
"learning_rate": 0.000129, |
|
"loss": 1.4799, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.023048112935753384, |
|
"grad_norm": 1.5464179515838623, |
|
"learning_rate": 0.000172, |
|
"loss": 1.2626, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.02881014116969173, |
|
"grad_norm": 1.7817376852035522, |
|
"learning_rate": 0.000215, |
|
"loss": 1.0018, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02881014116969173, |
|
"eval_loss": 1.4174914360046387, |
|
"eval_runtime": 44.7179, |
|
"eval_samples_per_second": 16.347, |
|
"eval_steps_per_second": 4.092, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03457216940363008, |
|
"grad_norm": 0.6755384802818298, |
|
"learning_rate": 0.0002147381354029311, |
|
"loss": 1.8599, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.040334197637568424, |
|
"grad_norm": 0.9027474522590637, |
|
"learning_rate": 0.0002139538173897188, |
|
"loss": 1.5252, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.04609622587150677, |
|
"grad_norm": 1.0059105157852173, |
|
"learning_rate": 0.0002126508670788841, |
|
"loss": 1.2772, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.05185825410544512, |
|
"grad_norm": 0.9736582636833191, |
|
"learning_rate": 0.00021083563231336926, |
|
"loss": 1.0586, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.05762028233938346, |
|
"grad_norm": 1.0923612117767334, |
|
"learning_rate": 0.00020851695673448515, |
|
"loss": 0.7892, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.05762028233938346, |
|
"eval_loss": 1.486754298210144, |
|
"eval_runtime": 44.9804, |
|
"eval_samples_per_second": 16.252, |
|
"eval_steps_per_second": 4.068, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0633823105733218, |
|
"grad_norm": 0.610034704208374, |
|
"learning_rate": 0.00020570613669657956, |
|
"loss": 1.7989, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.06914433880726016, |
|
"grad_norm": 0.7986786365509033, |
|
"learning_rate": 0.00020241686623233464, |
|
"loss": 1.5293, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.0749063670411985, |
|
"grad_norm": 0.9212441444396973, |
|
"learning_rate": 0.00019866517033681577, |
|
"loss": 1.2778, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.08066839527513685, |
|
"grad_norm": 0.9731055498123169, |
|
"learning_rate": 0.00019446932689530684, |
|
"loss": 1.0822, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.08643042350907519, |
|
"grad_norm": 1.488525390625, |
|
"learning_rate": 0.0001898497776352901, |
|
"loss": 0.8345, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.08643042350907519, |
|
"eval_loss": 1.5110596418380737, |
|
"eval_runtime": 45.1761, |
|
"eval_samples_per_second": 16.181, |
|
"eval_steps_per_second": 4.051, |
|
"step": 150 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 2 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.708774936641536e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|