|
{ |
|
"best_metric": 0.683539867401123, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 0.05995203836930456, |
|
"eval_steps": 25, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.002398081534772182, |
|
"grad_norm": 0.30095940828323364, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.8193, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002398081534772182, |
|
"eval_loss": 0.7507125735282898, |
|
"eval_runtime": 0.7441, |
|
"eval_samples_per_second": 67.196, |
|
"eval_steps_per_second": 17.471, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004796163069544364, |
|
"grad_norm": 0.3760162591934204, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.7795, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.007194244604316547, |
|
"grad_norm": 0.39668312668800354, |
|
"learning_rate": 0.0001, |
|
"loss": 0.8522, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.009592326139088728, |
|
"grad_norm": 0.3185388743877411, |
|
"learning_rate": 9.997376600647783e-05, |
|
"loss": 0.8081, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.011990407673860911, |
|
"grad_norm": 0.34676116704940796, |
|
"learning_rate": 9.989509461357426e-05, |
|
"loss": 0.76, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.014388489208633094, |
|
"grad_norm": 0.3574373722076416, |
|
"learning_rate": 9.976407754861426e-05, |
|
"loss": 0.7665, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.016786570743405275, |
|
"grad_norm": 0.33820533752441406, |
|
"learning_rate": 9.958086757163489e-05, |
|
"loss": 0.7747, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.019184652278177457, |
|
"grad_norm": 0.33398616313934326, |
|
"learning_rate": 9.934567829727386e-05, |
|
"loss": 0.7295, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02158273381294964, |
|
"grad_norm": 0.34293198585510254, |
|
"learning_rate": 9.905878394570453e-05, |
|
"loss": 0.7783, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.023980815347721823, |
|
"grad_norm": 0.33424755930900574, |
|
"learning_rate": 9.872051902290737e-05, |
|
"loss": 0.6896, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.026378896882494004, |
|
"grad_norm": 0.3323270082473755, |
|
"learning_rate": 9.833127793065098e-05, |
|
"loss": 0.6557, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.02877697841726619, |
|
"grad_norm": 0.37142112851142883, |
|
"learning_rate": 9.789151450663723e-05, |
|
"loss": 0.733, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03117505995203837, |
|
"grad_norm": 0.2586774528026581, |
|
"learning_rate": 9.740174149534693e-05, |
|
"loss": 0.7245, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.03357314148681055, |
|
"grad_norm": 0.23137016594409943, |
|
"learning_rate": 9.686252995020249e-05, |
|
"loss": 0.8023, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03597122302158273, |
|
"grad_norm": 0.2453051507472992, |
|
"learning_rate": 9.627450856774539e-05, |
|
"loss": 0.8549, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03836930455635491, |
|
"grad_norm": 0.2271958887577057, |
|
"learning_rate": 9.563836295460398e-05, |
|
"loss": 0.7908, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0407673860911271, |
|
"grad_norm": 0.23564989864826202, |
|
"learning_rate": 9.495483482810688e-05, |
|
"loss": 0.8116, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.04316546762589928, |
|
"grad_norm": 0.24254751205444336, |
|
"learning_rate": 9.422472115147382e-05, |
|
"loss": 0.8145, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.045563549160671464, |
|
"grad_norm": 0.23949633538722992, |
|
"learning_rate": 9.3448873204592e-05, |
|
"loss": 0.7682, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.047961630695443645, |
|
"grad_norm": 0.26809075474739075, |
|
"learning_rate": 9.2628195591462e-05, |
|
"loss": 0.7718, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.050359712230215826, |
|
"grad_norm": 0.282449334859848, |
|
"learning_rate": 9.176364518546989e-05, |
|
"loss": 0.76, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.05275779376498801, |
|
"grad_norm": 0.27863359451293945, |
|
"learning_rate": 9.08562300137157e-05, |
|
"loss": 0.6711, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.05515587529976019, |
|
"grad_norm": 0.2779116630554199, |
|
"learning_rate": 8.990700808169889e-05, |
|
"loss": 0.7516, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.05755395683453238, |
|
"grad_norm": 0.28714075684547424, |
|
"learning_rate": 8.891708613973126e-05, |
|
"loss": 0.6643, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.05995203836930456, |
|
"grad_norm": 0.33895036578178406, |
|
"learning_rate": 8.788761839251559e-05, |
|
"loss": 0.7124, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.05995203836930456, |
|
"eval_loss": 0.683539867401123, |
|
"eval_runtime": 0.7562, |
|
"eval_samples_per_second": 66.123, |
|
"eval_steps_per_second": 17.192, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 95, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.476965970214912e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|