bbytxt's picture
Training in progress, step 54, checkpoint
c31db14 verified
{
"best_metric": 3.401686906814575,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 2.0373831775700935,
"eval_steps": 25,
"global_step": 54,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.037383177570093455,
"grad_norm": 2.7531967163085938,
"learning_rate": 2.9999999999999997e-05,
"loss": 7.8351,
"step": 1
},
{
"epoch": 0.037383177570093455,
"eval_loss": 8.958544731140137,
"eval_runtime": 4.6786,
"eval_samples_per_second": 10.687,
"eval_steps_per_second": 1.496,
"step": 1
},
{
"epoch": 0.07476635514018691,
"grad_norm": 2.949021100997925,
"learning_rate": 5.9999999999999995e-05,
"loss": 8.5717,
"step": 2
},
{
"epoch": 0.11214953271028037,
"grad_norm": 3.286224842071533,
"learning_rate": 8.999999999999999e-05,
"loss": 8.8883,
"step": 3
},
{
"epoch": 0.14953271028037382,
"grad_norm": 3.830043315887451,
"learning_rate": 0.00011999999999999999,
"loss": 9.2578,
"step": 4
},
{
"epoch": 0.18691588785046728,
"grad_norm": 5.241765975952148,
"learning_rate": 0.00015,
"loss": 9.069,
"step": 5
},
{
"epoch": 0.22429906542056074,
"grad_norm": 10.764116287231445,
"learning_rate": 0.00017999999999999998,
"loss": 8.8101,
"step": 6
},
{
"epoch": 0.2616822429906542,
"grad_norm": 4.389330863952637,
"learning_rate": 0.00020999999999999998,
"loss": 6.1341,
"step": 7
},
{
"epoch": 0.29906542056074764,
"grad_norm": 6.786483287811279,
"learning_rate": 0.00023999999999999998,
"loss": 6.6059,
"step": 8
},
{
"epoch": 0.3364485981308411,
"grad_norm": 5.6405463218688965,
"learning_rate": 0.00027,
"loss": 5.9216,
"step": 9
},
{
"epoch": 0.37383177570093457,
"grad_norm": 9.129680633544922,
"learning_rate": 0.0003,
"loss": 5.3773,
"step": 10
},
{
"epoch": 0.411214953271028,
"grad_norm": 7.611507415771484,
"learning_rate": 0.000299617817191538,
"loss": 4.8489,
"step": 11
},
{
"epoch": 0.4485981308411215,
"grad_norm": 8.8467378616333,
"learning_rate": 0.0002984732162821399,
"loss": 4.6982,
"step": 12
},
{
"epoch": 0.48598130841121495,
"grad_norm": 5.18742036819458,
"learning_rate": 0.00029657202989567393,
"loss": 4.0034,
"step": 13
},
{
"epoch": 0.5233644859813084,
"grad_norm": 3.5742533206939697,
"learning_rate": 0.0002939239460421746,
"loss": 4.4227,
"step": 14
},
{
"epoch": 0.5607476635514018,
"grad_norm": 3.673884630203247,
"learning_rate": 0.00029054245874996426,
"loss": 3.9501,
"step": 15
},
{
"epoch": 0.5981308411214953,
"grad_norm": 3.613555908203125,
"learning_rate": 0.00028644479930317775,
"loss": 4.1168,
"step": 16
},
{
"epoch": 0.6355140186915887,
"grad_norm": 3.2746894359588623,
"learning_rate": 0.0002816518484350883,
"loss": 3.7159,
"step": 17
},
{
"epoch": 0.6728971962616822,
"grad_norm": 3.354125499725342,
"learning_rate": 0.0002761880299246772,
"loss": 3.7459,
"step": 18
},
{
"epoch": 0.7102803738317757,
"grad_norm": 3.6942992210388184,
"learning_rate": 0.00027008118613865406,
"loss": 3.6319,
"step": 19
},
{
"epoch": 0.7476635514018691,
"grad_norm": 3.4715960025787354,
"learning_rate": 0.00026336243615313873,
"loss": 3.7446,
"step": 20
},
{
"epoch": 0.7850467289719626,
"grad_norm": 2.8266215324401855,
"learning_rate": 0.00025606601717798207,
"loss": 3.3823,
"step": 21
},
{
"epoch": 0.822429906542056,
"grad_norm": 2.729022979736328,
"learning_rate": 0.00024822911009179276,
"loss": 3.5056,
"step": 22
},
{
"epoch": 0.8598130841121495,
"grad_norm": 3.182410955429077,
"learning_rate": 0.00023989164997670202,
"loss": 3.4934,
"step": 23
},
{
"epoch": 0.897196261682243,
"grad_norm": 4.04162073135376,
"learning_rate": 0.00023109612261833963,
"loss": 3.5696,
"step": 24
},
{
"epoch": 0.9345794392523364,
"grad_norm": 2.7601511478424072,
"learning_rate": 0.00022188734800800852,
"loss": 3.1508,
"step": 25
},
{
"epoch": 0.9345794392523364,
"eval_loss": 3.467716693878174,
"eval_runtime": 4.2146,
"eval_samples_per_second": 11.864,
"eval_steps_per_second": 1.661,
"step": 25
},
{
"epoch": 0.9719626168224299,
"grad_norm": 3.114112615585327,
"learning_rate": 0.00021231225195028297,
"loss": 3.4493,
"step": 26
},
{
"epoch": 1.0186915887850467,
"grad_norm": 4.9350666999816895,
"learning_rate": 0.00020241962693986476,
"loss": 5.0985,
"step": 27
},
{
"epoch": 1.0560747663551402,
"grad_norm": 2.5191762447357178,
"learning_rate": 0.00019225988352621445,
"loss": 2.8999,
"step": 28
},
{
"epoch": 1.0934579439252337,
"grad_norm": 3.0265848636627197,
"learning_rate": 0.00018188479343294648,
"loss": 3.0403,
"step": 29
},
{
"epoch": 1.1308411214953271,
"grad_norm": 3.448732376098633,
"learning_rate": 0.00017134722574099276,
"loss": 3.3247,
"step": 30
},
{
"epoch": 1.1682242990654206,
"grad_norm": 2.8440847396850586,
"learning_rate": 0.00016070087747988482,
"loss": 2.8432,
"step": 31
},
{
"epoch": 1.205607476635514,
"grad_norm": 3.15624737739563,
"learning_rate": 0.00015,
"loss": 2.8891,
"step": 32
},
{
"epoch": 1.2429906542056075,
"grad_norm": 2.830214262008667,
"learning_rate": 0.00013929912252011516,
"loss": 2.8265,
"step": 33
},
{
"epoch": 1.280373831775701,
"grad_norm": 2.4644389152526855,
"learning_rate": 0.00012865277425900724,
"loss": 2.4983,
"step": 34
},
{
"epoch": 1.3177570093457944,
"grad_norm": 3.523728370666504,
"learning_rate": 0.00011811520656705348,
"loss": 2.8003,
"step": 35
},
{
"epoch": 1.355140186915888,
"grad_norm": 3.176539897918701,
"learning_rate": 0.00010774011647378553,
"loss": 2.7638,
"step": 36
},
{
"epoch": 1.3925233644859814,
"grad_norm": 3.2825207710266113,
"learning_rate": 9.758037306013526e-05,
"loss": 2.7658,
"step": 37
},
{
"epoch": 1.4299065420560748,
"grad_norm": 3.181520938873291,
"learning_rate": 8.768774804971705e-05,
"loss": 2.5484,
"step": 38
},
{
"epoch": 1.4672897196261683,
"grad_norm": 3.379570960998535,
"learning_rate": 7.811265199199152e-05,
"loss": 2.901,
"step": 39
},
{
"epoch": 1.5046728971962615,
"grad_norm": 3.2364346981048584,
"learning_rate": 6.890387738166041e-05,
"loss": 2.535,
"step": 40
},
{
"epoch": 1.542056074766355,
"grad_norm": 3.4163613319396973,
"learning_rate": 6.010835002329795e-05,
"loss": 2.567,
"step": 41
},
{
"epoch": 1.5794392523364484,
"grad_norm": 3.7154176235198975,
"learning_rate": 5.1770889908207245e-05,
"loss": 2.6049,
"step": 42
},
{
"epoch": 1.616822429906542,
"grad_norm": 3.4739344120025635,
"learning_rate": 4.3933982822017876e-05,
"loss": 2.8533,
"step": 43
},
{
"epoch": 1.6542056074766354,
"grad_norm": 3.2199292182922363,
"learning_rate": 3.663756384686127e-05,
"loss": 2.529,
"step": 44
},
{
"epoch": 1.6915887850467288,
"grad_norm": 3.004704236984253,
"learning_rate": 2.9918813861345952e-05,
"loss": 2.4814,
"step": 45
},
{
"epoch": 1.7289719626168223,
"grad_norm": 3.350970506668091,
"learning_rate": 2.38119700753228e-05,
"loss": 2.4404,
"step": 46
},
{
"epoch": 1.7663551401869158,
"grad_norm": 3.503912925720215,
"learning_rate": 1.834815156491165e-05,
"loss": 2.5487,
"step": 47
},
{
"epoch": 1.8037383177570092,
"grad_norm": 3.0378119945526123,
"learning_rate": 1.3555200696822232e-05,
"loss": 2.5837,
"step": 48
},
{
"epoch": 1.8411214953271027,
"grad_norm": 3.5352275371551514,
"learning_rate": 9.45754125003576e-06,
"loss": 2.6595,
"step": 49
},
{
"epoch": 1.8785046728971961,
"grad_norm": 3.9558587074279785,
"learning_rate": 6.076053957825411e-06,
"loss": 2.9114,
"step": 50
},
{
"epoch": 1.8785046728971961,
"eval_loss": 3.401686906814575,
"eval_runtime": 4.2148,
"eval_samples_per_second": 11.863,
"eval_steps_per_second": 1.661,
"step": 50
},
{
"epoch": 1.9158878504672896,
"grad_norm": 3.5044848918914795,
"learning_rate": 3.4279701043260886e-06,
"loss": 2.5647,
"step": 51
},
{
"epoch": 1.953271028037383,
"grad_norm": 3.4336962699890137,
"learning_rate": 1.5267837178600972e-06,
"loss": 2.6922,
"step": 52
},
{
"epoch": 1.9906542056074765,
"grad_norm": 5.213560104370117,
"learning_rate": 3.821828084619727e-07,
"loss": 3.6647,
"step": 53
},
{
"epoch": 2.0373831775700935,
"grad_norm": 2.9050323963165283,
"learning_rate": 0.0,
"loss": 2.2642,
"step": 54
}
],
"logging_steps": 1,
"max_steps": 54,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.51315657885614e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}