Safetensors
modernbert
SparseModernBERT-alpha1.5 / trainer_state.json
mtreviso's picture
Upload SparseModernBERT α=1.5 checkpoints
7fb25b6
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 100000000,
"global_step": 100000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 1.5673162937164307,
"learning_rate": 9.9003e-06,
"loss": 1.0389,
"step": 1000
},
{
"epoch": 0.02,
"grad_norm": 2.175995111465454,
"learning_rate": 9.8003e-06,
"loss": 1.0254,
"step": 2000
},
{
"epoch": 0.03,
"grad_norm": 1.515001654624939,
"learning_rate": 9.700300000000001e-06,
"loss": 1.0222,
"step": 3000
},
{
"epoch": 0.04,
"grad_norm": 1.4583686590194702,
"learning_rate": 9.600300000000002e-06,
"loss": 1.0159,
"step": 4000
},
{
"epoch": 0.05,
"grad_norm": 1.7361509799957275,
"learning_rate": 9.500300000000001e-06,
"loss": 1.0143,
"step": 5000
},
{
"epoch": 0.06,
"grad_norm": 1.4579741954803467,
"learning_rate": 9.4003e-06,
"loss": 1.0143,
"step": 6000
},
{
"epoch": 0.07,
"grad_norm": 1.4651838541030884,
"learning_rate": 9.300300000000001e-06,
"loss": 1.016,
"step": 7000
},
{
"epoch": 0.08,
"grad_norm": 1.4496525526046753,
"learning_rate": 9.2003e-06,
"loss": 1.0077,
"step": 8000
},
{
"epoch": 0.09,
"grad_norm": 1.500932216644287,
"learning_rate": 9.100300000000002e-06,
"loss": 1.0102,
"step": 9000
},
{
"epoch": 0.1,
"grad_norm": 1.572792887687683,
"learning_rate": 9.000300000000001e-06,
"loss": 1.014,
"step": 10000
},
{
"epoch": 0.11,
"grad_norm": 1.4459178447723389,
"learning_rate": 8.900400000000002e-06,
"loss": 1.0144,
"step": 11000
},
{
"epoch": 0.12,
"grad_norm": 1.6553645133972168,
"learning_rate": 8.800500000000002e-06,
"loss": 1.0085,
"step": 12000
},
{
"epoch": 0.13,
"grad_norm": 1.517026424407959,
"learning_rate": 8.7005e-06,
"loss": 1.0113,
"step": 13000
},
{
"epoch": 0.14,
"grad_norm": 1.547823429107666,
"learning_rate": 8.6005e-06,
"loss": 1.0142,
"step": 14000
},
{
"epoch": 0.15,
"grad_norm": 1.4814399480819702,
"learning_rate": 8.500500000000001e-06,
"loss": 1.0033,
"step": 15000
},
{
"epoch": 0.16,
"grad_norm": 1.7205946445465088,
"learning_rate": 8.4005e-06,
"loss": 1.0132,
"step": 16000
},
{
"epoch": 0.17,
"grad_norm": 1.655186414718628,
"learning_rate": 8.300600000000001e-06,
"loss": 1.0091,
"step": 17000
},
{
"epoch": 0.18,
"grad_norm": 1.5006760358810425,
"learning_rate": 8.200700000000001e-06,
"loss": 1.0109,
"step": 18000
},
{
"epoch": 0.19,
"grad_norm": 1.5290812253952026,
"learning_rate": 8.1007e-06,
"loss": 1.0114,
"step": 19000
},
{
"epoch": 0.2,
"grad_norm": 1.4716876745224,
"learning_rate": 8.0007e-06,
"loss": 1.0117,
"step": 20000
},
{
"epoch": 0.21,
"grad_norm": 2.0684456825256348,
"learning_rate": 7.9007e-06,
"loss": 1.0063,
"step": 21000
},
{
"epoch": 0.22,
"grad_norm": 1.4653006792068481,
"learning_rate": 7.8008e-06,
"loss": 1.0106,
"step": 22000
},
{
"epoch": 0.23,
"grad_norm": 1.6020914316177368,
"learning_rate": 7.700800000000001e-06,
"loss": 1.0115,
"step": 23000
},
{
"epoch": 0.24,
"grad_norm": 1.48885977268219,
"learning_rate": 7.6008e-06,
"loss": 1.0179,
"step": 24000
},
{
"epoch": 0.25,
"grad_norm": 1.6026817560195923,
"learning_rate": 7.5008e-06,
"loss": 1.0098,
"step": 25000
},
{
"epoch": 0.26,
"grad_norm": 1.8403682708740234,
"learning_rate": 7.400900000000001e-06,
"loss": 1.0115,
"step": 26000
},
{
"epoch": 0.27,
"grad_norm": 1.908618450164795,
"learning_rate": 7.300900000000001e-06,
"loss": 1.0078,
"step": 27000
},
{
"epoch": 0.28,
"grad_norm": 1.61348557472229,
"learning_rate": 7.201e-06,
"loss": 1.014,
"step": 28000
},
{
"epoch": 0.29,
"grad_norm": 1.4422307014465332,
"learning_rate": 7.101e-06,
"loss": 1.0045,
"step": 29000
},
{
"epoch": 0.3,
"grad_norm": 1.5274263620376587,
"learning_rate": 7.001e-06,
"loss": 1.007,
"step": 30000
},
{
"epoch": 0.31,
"grad_norm": 1.6283539533615112,
"learning_rate": 6.9012e-06,
"loss": 1.0098,
"step": 31000
},
{
"epoch": 0.32,
"grad_norm": 1.4117109775543213,
"learning_rate": 6.8012e-06,
"loss": 1.0137,
"step": 32000
},
{
"epoch": 0.33,
"grad_norm": 1.6345027685165405,
"learning_rate": 6.701200000000001e-06,
"loss": 1.0195,
"step": 33000
},
{
"epoch": 0.34,
"grad_norm": 1.5019372701644897,
"learning_rate": 6.601300000000001e-06,
"loss": 1.0109,
"step": 34000
},
{
"epoch": 0.35,
"grad_norm": 1.5113171339035034,
"learning_rate": 6.501300000000001e-06,
"loss": 1.0123,
"step": 35000
},
{
"epoch": 0.36,
"grad_norm": 1.7723476886749268,
"learning_rate": 6.401400000000001e-06,
"loss": 1.0123,
"step": 36000
},
{
"epoch": 0.37,
"grad_norm": 1.691326379776001,
"learning_rate": 6.301400000000001e-06,
"loss": 1.0168,
"step": 37000
},
{
"epoch": 0.38,
"grad_norm": 1.8816444873809814,
"learning_rate": 6.2014000000000005e-06,
"loss": 1.0122,
"step": 38000
},
{
"epoch": 0.39,
"grad_norm": 1.472257137298584,
"learning_rate": 6.1014000000000006e-06,
"loss": 1.0154,
"step": 39000
},
{
"epoch": 0.4,
"grad_norm": 1.6379979848861694,
"learning_rate": 6.0015e-06,
"loss": 1.0083,
"step": 40000
},
{
"epoch": 0.41,
"grad_norm": 1.7332875728607178,
"learning_rate": 5.9015e-06,
"loss": 1.0094,
"step": 41000
},
{
"epoch": 0.42,
"grad_norm": 1.7920299768447876,
"learning_rate": 5.8016000000000005e-06,
"loss": 1.0223,
"step": 42000
},
{
"epoch": 0.43,
"grad_norm": 1.6276768445968628,
"learning_rate": 5.701600000000001e-06,
"loss": 1.0145,
"step": 43000
},
{
"epoch": 0.44,
"grad_norm": 1.84978187084198,
"learning_rate": 5.601600000000001e-06,
"loss": 1.0138,
"step": 44000
},
{
"epoch": 0.45,
"grad_norm": 1.6746885776519775,
"learning_rate": 5.5016e-06,
"loss": 1.0137,
"step": 45000
},
{
"epoch": 0.46,
"grad_norm": 7.81512451171875,
"learning_rate": 5.4018000000000005e-06,
"loss": 1.0229,
"step": 46000
},
{
"epoch": 0.47,
"grad_norm": 1.868177890777588,
"learning_rate": 5.301800000000001e-06,
"loss": 1.0218,
"step": 47000
},
{
"epoch": 0.48,
"grad_norm": 2.3036234378814697,
"learning_rate": 5.2018e-06,
"loss": 1.0237,
"step": 48000
},
{
"epoch": 0.49,
"grad_norm": 1.7692339420318604,
"learning_rate": 5.1020000000000004e-06,
"loss": 1.0198,
"step": 49000
},
{
"epoch": 0.5,
"grad_norm": 1.7446075677871704,
"learning_rate": 5.0020000000000006e-06,
"loss": 1.0207,
"step": 50000
},
{
"epoch": 0.51,
"grad_norm": 1.5397661924362183,
"learning_rate": 4.902000000000001e-06,
"loss": 1.0189,
"step": 51000
},
{
"epoch": 0.52,
"grad_norm": 1.762879490852356,
"learning_rate": 4.802000000000001e-06,
"loss": 1.0174,
"step": 52000
},
{
"epoch": 0.53,
"grad_norm": 1.9986176490783691,
"learning_rate": 4.702e-06,
"loss": 1.0181,
"step": 53000
},
{
"epoch": 0.54,
"grad_norm": 1.617098331451416,
"learning_rate": 4.6021e-06,
"loss": 1.0043,
"step": 54000
},
{
"epoch": 0.55,
"grad_norm": 2.0762548446655273,
"learning_rate": 4.5021000000000005e-06,
"loss": 1.0117,
"step": 55000
},
{
"epoch": 0.56,
"grad_norm": 1.9250789880752563,
"learning_rate": 4.4022e-06,
"loss": 1.0128,
"step": 56000
},
{
"epoch": 0.57,
"grad_norm": 1.956571340560913,
"learning_rate": 4.3022e-06,
"loss": 1.0134,
"step": 57000
},
{
"epoch": 0.58,
"grad_norm": 1.921067237854004,
"learning_rate": 4.2022e-06,
"loss": 1.0135,
"step": 58000
},
{
"epoch": 0.59,
"grad_norm": 1.667677640914917,
"learning_rate": 4.1022e-06,
"loss": 1.0211,
"step": 59000
},
{
"epoch": 0.6,
"grad_norm": 1.9290637969970703,
"learning_rate": 4.0024e-06,
"loss": 1.0073,
"step": 60000
},
{
"epoch": 0.61,
"grad_norm": 1.764079213142395,
"learning_rate": 3.9024e-06,
"loss": 1.0151,
"step": 61000
},
{
"epoch": 0.62,
"grad_norm": 1.786582350730896,
"learning_rate": 3.8024000000000006e-06,
"loss": 1.0196,
"step": 62000
},
{
"epoch": 0.63,
"grad_norm": 1.597281813621521,
"learning_rate": 3.7025000000000005e-06,
"loss": 1.0117,
"step": 63000
},
{
"epoch": 0.64,
"grad_norm": 1.5449904203414917,
"learning_rate": 3.6025000000000002e-06,
"loss": 1.0063,
"step": 64000
},
{
"epoch": 0.65,
"grad_norm": 1.5187263488769531,
"learning_rate": 3.5026000000000006e-06,
"loss": 1.0087,
"step": 65000
},
{
"epoch": 0.66,
"grad_norm": 2.036907434463501,
"learning_rate": 3.4026000000000003e-06,
"loss": 1.0115,
"step": 66000
},
{
"epoch": 0.67,
"grad_norm": 1.8339091539382935,
"learning_rate": 3.3026000000000004e-06,
"loss": 1.0055,
"step": 67000
},
{
"epoch": 0.68,
"grad_norm": 1.6574101448059082,
"learning_rate": 3.2027000000000003e-06,
"loss": 1.0078,
"step": 68000
},
{
"epoch": 0.69,
"grad_norm": 1.7668465375900269,
"learning_rate": 3.1028e-06,
"loss": 1.0012,
"step": 69000
},
{
"epoch": 0.7,
"grad_norm": 1.5608104467391968,
"learning_rate": 3.0028000000000003e-06,
"loss": 1.0063,
"step": 70000
},
{
"epoch": 0.71,
"grad_norm": 1.808834433555603,
"learning_rate": 2.9028e-06,
"loss": 1.0063,
"step": 71000
},
{
"epoch": 0.72,
"grad_norm": 1.8370723724365234,
"learning_rate": 2.8029e-06,
"loss": 1.0093,
"step": 72000
},
{
"epoch": 0.73,
"grad_norm": 1.6162232160568237,
"learning_rate": 2.7029e-06,
"loss": 1.0092,
"step": 73000
},
{
"epoch": 0.74,
"grad_norm": 1.728576421737671,
"learning_rate": 2.6029000000000005e-06,
"loss": 0.9978,
"step": 74000
},
{
"epoch": 0.75,
"grad_norm": 1.6973289251327515,
"learning_rate": 2.5029e-06,
"loss": 1.0086,
"step": 75000
},
{
"epoch": 0.76,
"grad_norm": 2.00152850151062,
"learning_rate": 2.4029000000000003e-06,
"loss": 1.0048,
"step": 76000
},
{
"epoch": 0.77,
"grad_norm": 2.0380213260650635,
"learning_rate": 2.3029e-06,
"loss": 1.0067,
"step": 77000
},
{
"epoch": 0.78,
"grad_norm": 1.9548099040985107,
"learning_rate": 2.2031e-06,
"loss": 1.0196,
"step": 78000
},
{
"epoch": 0.79,
"grad_norm": 1.6247434616088867,
"learning_rate": 2.1031000000000002e-06,
"loss": 1.0067,
"step": 79000
},
{
"epoch": 0.8,
"grad_norm": 1.6271846294403076,
"learning_rate": 2.0031e-06,
"loss": 1.0075,
"step": 80000
},
{
"epoch": 0.81,
"grad_norm": 1.7199640274047852,
"learning_rate": 1.9031000000000003e-06,
"loss": 0.9991,
"step": 81000
},
{
"epoch": 0.82,
"grad_norm": 1.5637553930282593,
"learning_rate": 1.8033e-06,
"loss": 1.0045,
"step": 82000
},
{
"epoch": 0.83,
"grad_norm": 1.5427855253219604,
"learning_rate": 1.7033000000000003e-06,
"loss": 1.0053,
"step": 83000
},
{
"epoch": 0.84,
"grad_norm": 2.7503976821899414,
"learning_rate": 1.6033000000000002e-06,
"loss": 1.0032,
"step": 84000
},
{
"epoch": 0.85,
"grad_norm": 1.8314589262008667,
"learning_rate": 1.5033e-06,
"loss": 1.0065,
"step": 85000
},
{
"epoch": 0.86,
"grad_norm": 1.925482988357544,
"learning_rate": 1.4033000000000002e-06,
"loss": 1.0065,
"step": 86000
},
{
"epoch": 0.87,
"grad_norm": 1.8822989463806152,
"learning_rate": 1.3033e-06,
"loss": 1.0104,
"step": 87000
},
{
"epoch": 0.88,
"grad_norm": 1.5026013851165771,
"learning_rate": 1.2035e-06,
"loss": 1.0055,
"step": 88000
},
{
"epoch": 0.89,
"grad_norm": 1.8803695440292358,
"learning_rate": 1.1035000000000001e-06,
"loss": 1.0017,
"step": 89000
},
{
"epoch": 0.9,
"grad_norm": 1.9868890047073364,
"learning_rate": 1.0035e-06,
"loss": 0.9994,
"step": 90000
},
{
"epoch": 0.91,
"grad_norm": 1.9017215967178345,
"learning_rate": 9.035000000000001e-07,
"loss": 0.9996,
"step": 91000
},
{
"epoch": 0.92,
"grad_norm": 1.4807873964309692,
"learning_rate": 8.036e-07,
"loss": 1.0046,
"step": 92000
},
{
"epoch": 0.93,
"grad_norm": 1.6405707597732544,
"learning_rate": 7.036000000000001e-07,
"loss": 1.0001,
"step": 93000
},
{
"epoch": 0.94,
"grad_norm": 1.6944319009780884,
"learning_rate": 6.036e-07,
"loss": 1.0017,
"step": 94000
},
{
"epoch": 0.95,
"grad_norm": 1.8059883117675781,
"learning_rate": 5.037e-07,
"loss": 1.009,
"step": 95000
},
{
"epoch": 0.96,
"grad_norm": 1.6612234115600586,
"learning_rate": 4.038e-07,
"loss": 1.0013,
"step": 96000
},
{
"epoch": 0.97,
"grad_norm": 1.8323150873184204,
"learning_rate": 3.038e-07,
"loss": 1.0077,
"step": 97000
},
{
"epoch": 0.98,
"grad_norm": 1.857452630996704,
"learning_rate": 2.038e-07,
"loss": 0.9959,
"step": 98000
},
{
"epoch": 0.99,
"grad_norm": 1.6731070280075073,
"learning_rate": 1.0380000000000002e-07,
"loss": 1.0085,
"step": 99000
},
{
"epoch": 1.0,
"grad_norm": 1.7343461513519287,
"learning_rate": 3.9e-09,
"loss": 0.9988,
"step": 100000
},
{
"epoch": 1.0,
"step": 100000,
"total_flos": 1.0909051256832e+18,
"train_loss": 1.0107983074951172,
"train_runtime": 153234.8237,
"train_samples_per_second": 1.305,
"train_steps_per_second": 0.653
}
],
"logging_steps": 1000,
"max_steps": 100000,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 20000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0909051256832e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}