{ "best_metric": 1.0723166465759277, "best_model_checkpoint": "miner_id_24/checkpoint-500", "epoch": 0.34916201117318435, "eval_steps": 50, "global_step": 500, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0006983240223463687, "eval_loss": 1.370945930480957, "eval_runtime": 52.0353, "eval_samples_per_second": 11.588, "eval_steps_per_second": 2.902, "step": 1 }, { "epoch": 0.006983240223463687, "grad_norm": 0.7985172271728516, "learning_rate": 4.2000000000000004e-05, "loss": 1.0747, "step": 10 }, { "epoch": 0.013966480446927373, "grad_norm": 0.5064533352851868, "learning_rate": 8.400000000000001e-05, "loss": 0.9049, "step": 20 }, { "epoch": 0.02094972067039106, "grad_norm": 0.6195475459098816, "learning_rate": 0.000126, "loss": 0.9139, "step": 30 }, { "epoch": 0.027932960893854747, "grad_norm": 0.5974525213241577, "learning_rate": 0.00016800000000000002, "loss": 0.8777, "step": 40 }, { "epoch": 0.034916201117318434, "grad_norm": 1.1508451700210571, "learning_rate": 0.00021, "loss": 2.2731, "step": 50 }, { "epoch": 0.034916201117318434, "eval_loss": 1.2975257635116577, "eval_runtime": 52.0378, "eval_samples_per_second": 11.588, "eval_steps_per_second": 2.902, "step": 50 }, { "epoch": 0.04189944134078212, "grad_norm": 0.47768744826316833, "learning_rate": 0.00020974422527728155, "loss": 0.9301, "step": 60 }, { "epoch": 0.04888268156424581, "grad_norm": 0.4488859176635742, "learning_rate": 0.0002089781472178649, "loss": 0.8632, "step": 70 }, { "epoch": 0.055865921787709494, "grad_norm": 0.4876834750175476, "learning_rate": 0.0002077054980770496, "loss": 0.9053, "step": 80 }, { "epoch": 0.06284916201117319, "grad_norm": 0.6040335893630981, "learning_rate": 0.00020593247807352348, "loss": 0.9912, "step": 90 }, { "epoch": 0.06983240223463687, "grad_norm": 1.2908031940460205, "learning_rate": 0.00020366772518252038, "loss": 2.3175, "step": 100 }, { "epoch": 0.06983240223463687, "eval_loss": 1.2717502117156982, "eval_runtime": 52.2379, "eval_samples_per_second": 11.543, "eval_steps_per_second": 2.891, "step": 100 }, { "epoch": 0.07681564245810056, "grad_norm": 0.556641697883606, "learning_rate": 0.0002009222730524731, "loss": 0.9617, "step": 110 }, { "epoch": 0.08379888268156424, "grad_norm": 0.45357635617256165, "learning_rate": 0.00019770949725018733, "loss": 0.9447, "step": 120 }, { "epoch": 0.09078212290502793, "grad_norm": 0.530303418636322, "learning_rate": 0.00019404505009642473, "loss": 0.8661, "step": 130 }, { "epoch": 0.09776536312849161, "grad_norm": 0.544837474822998, "learning_rate": 0.0001899467844093695, "loss": 0.8477, "step": 140 }, { "epoch": 0.10474860335195531, "grad_norm": 1.3659236431121826, "learning_rate": 0.00018543466652749268, "loss": 2.1598, "step": 150 }, { "epoch": 0.10474860335195531, "eval_loss": 1.2168477773666382, "eval_runtime": 52.7182, "eval_samples_per_second": 11.438, "eval_steps_per_second": 2.864, "step": 150 }, { "epoch": 0.11173184357541899, "grad_norm": 0.4937569499015808, "learning_rate": 0.00018053067903555837, "loss": 0.9268, "step": 160 }, { "epoch": 0.11871508379888268, "grad_norm": 0.45097047090530396, "learning_rate": 0.00017525871366768012, "loss": 0.8534, "step": 170 }, { "epoch": 0.12569832402234637, "grad_norm": 0.442699670791626, "learning_rate": 0.00016964445490919413, "loss": 0.8584, "step": 180 }, { "epoch": 0.13268156424581007, "grad_norm": 0.49953675270080566, "learning_rate": 0.00016371525486442843, "loss": 0.7786, "step": 190 }, { "epoch": 0.13966480446927373, "grad_norm": 1.2703161239624023, "learning_rate": 0.0001575, "loss": 1.9828, "step": 200 }, { "epoch": 0.13966480446927373, "eval_loss": 1.1949564218521118, "eval_runtime": 51.9478, "eval_samples_per_second": 11.608, "eval_steps_per_second": 2.907, "step": 200 }, { "epoch": 0.14664804469273743, "grad_norm": 0.4466894268989563, "learning_rate": 0.00015102897041285315, "loss": 0.9098, "step": 210 }, { "epoch": 0.15363128491620112, "grad_norm": 0.4285886287689209, "learning_rate": 0.00014433369230867077, "loss": 0.9563, "step": 220 }, { "epoch": 0.16061452513966482, "grad_norm": 0.4518221616744995, "learning_rate": 0.0001374467844093695, "loss": 0.748, "step": 230 }, { "epoch": 0.16759776536312848, "grad_norm": 0.6178202033042908, "learning_rate": 0.0001304017990379651, "loss": 0.9594, "step": 240 }, { "epoch": 0.17458100558659218, "grad_norm": 1.0840479135513306, "learning_rate": 0.0001232330586550277, "loss": 2.2842, "step": 250 }, { "epoch": 0.17458100558659218, "eval_loss": 1.148054599761963, "eval_runtime": 52.0966, "eval_samples_per_second": 11.575, "eval_steps_per_second": 2.898, "step": 250 }, { "epoch": 0.18156424581005587, "grad_norm": 0.48482319712638855, "learning_rate": 0.00011597548864310363, "loss": 0.9017, "step": 260 }, { "epoch": 0.18854748603351956, "grad_norm": 0.42090746760368347, "learning_rate": 0.00010866444715376263, "loss": 0.8558, "step": 270 }, { "epoch": 0.19553072625698323, "grad_norm": 0.44340935349464417, "learning_rate": 0.00010133555284623744, "loss": 0.7718, "step": 280 }, { "epoch": 0.20251396648044692, "grad_norm": 0.5416050553321838, "learning_rate": 9.402451135689641e-05, "loss": 0.8102, "step": 290 }, { "epoch": 0.20949720670391062, "grad_norm": 1.8410241603851318, "learning_rate": 8.676694134497232e-05, "loss": 1.9591, "step": 300 }, { "epoch": 0.20949720670391062, "eval_loss": 1.1187446117401123, "eval_runtime": 51.9082, "eval_samples_per_second": 11.617, "eval_steps_per_second": 2.909, "step": 300 }, { "epoch": 0.2164804469273743, "grad_norm": 0.4224921762943268, "learning_rate": 7.95982009620349e-05, "loss": 0.9126, "step": 310 }, { "epoch": 0.22346368715083798, "grad_norm": 0.41170719265937805, "learning_rate": 7.255321559063053e-05, "loss": 0.819, "step": 320 }, { "epoch": 0.23044692737430167, "grad_norm": 0.4653985798358917, "learning_rate": 6.566630769132923e-05, "loss": 0.8104, "step": 330 }, { "epoch": 0.23743016759776536, "grad_norm": 0.4874189496040344, "learning_rate": 5.897102958714686e-05, "loss": 0.8025, "step": 340 }, { "epoch": 0.24441340782122906, "grad_norm": 1.3538293838500977, "learning_rate": 5.250000000000002e-05, "loss": 2.2914, "step": 350 }, { "epoch": 0.24441340782122906, "eval_loss": 1.0988595485687256, "eval_runtime": 53.0976, "eval_samples_per_second": 11.356, "eval_steps_per_second": 2.844, "step": 350 }, { "epoch": 0.25139664804469275, "grad_norm": 0.40342968702316284, "learning_rate": 4.62847451355716e-05, "loss": 0.846, "step": 360 }, { "epoch": 0.25837988826815644, "grad_norm": 0.3979036509990692, "learning_rate": 4.035554509080588e-05, "loss": 0.8506, "step": 370 }, { "epoch": 0.26536312849162014, "grad_norm": 0.46212857961654663, "learning_rate": 3.474128633231992e-05, "loss": 0.769, "step": 380 }, { "epoch": 0.2723463687150838, "grad_norm": 0.49502870440483093, "learning_rate": 2.946932096444165e-05, "loss": 0.9183, "step": 390 }, { "epoch": 0.27932960893854747, "grad_norm": 1.1559889316558838, "learning_rate": 2.456533347250732e-05, "loss": 2.3416, "step": 400 }, { "epoch": 0.27932960893854747, "eval_loss": 1.0803192853927612, "eval_runtime": 51.8492, "eval_samples_per_second": 11.63, "eval_steps_per_second": 2.912, "step": 400 }, { "epoch": 0.28631284916201116, "grad_norm": 0.40684303641319275, "learning_rate": 2.005321559063053e-05, "loss": 0.8687, "step": 410 }, { "epoch": 0.29329608938547486, "grad_norm": 0.3868181109428406, "learning_rate": 1.5954949903575276e-05, "loss": 0.7972, "step": 420 }, { "epoch": 0.30027932960893855, "grad_norm": 0.415388286113739, "learning_rate": 1.2290502749812666e-05, "loss": 0.7463, "step": 430 }, { "epoch": 0.30726256983240224, "grad_norm": 0.5129038095474243, "learning_rate": 9.077726947526898e-06, "loss": 0.8726, "step": 440 }, { "epoch": 0.31424581005586594, "grad_norm": 1.1775633096694946, "learning_rate": 6.332274817479627e-06, "loss": 2.3677, "step": 450 }, { "epoch": 0.31424581005586594, "eval_loss": 1.0729554891586304, "eval_runtime": 51.7424, "eval_samples_per_second": 11.654, "eval_steps_per_second": 2.918, "step": 450 }, { "epoch": 0.32122905027932963, "grad_norm": 0.3964717388153076, "learning_rate": 4.067521926476516e-06, "loss": 0.8386, "step": 460 }, { "epoch": 0.32821229050279327, "grad_norm": 0.3847760558128357, "learning_rate": 2.294501922950403e-06, "loss": 0.7781, "step": 470 }, { "epoch": 0.33519553072625696, "grad_norm": 0.4073781669139862, "learning_rate": 1.021852782135112e-06, "loss": 0.752, "step": 480 }, { "epoch": 0.34217877094972066, "grad_norm": 0.47937291860580444, "learning_rate": 2.5577472271845927e-07, "loss": 0.8182, "step": 490 }, { "epoch": 0.34916201117318435, "grad_norm": 1.1718170642852783, "learning_rate": 0.0, "loss": 2.1401, "step": 500 }, { "epoch": 0.34916201117318435, "eval_loss": 1.0723166465759277, "eval_runtime": 52.1413, "eval_samples_per_second": 11.565, "eval_steps_per_second": 2.896, "step": 500 } ], "logging_steps": 10, "max_steps": 500, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 50, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 3, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.408558478917632e+17, "train_batch_size": 4, "trial_name": null, "trial_params": null }