minghaowu's picture
Training in progress, step 300, checkpoint
756b61d verified
raw
history blame
5.95 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.155226558995065,
"eval_steps": 500,
"global_step": 300,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07178106774338268,
"grad_norm": 0.46034833788871765,
"learning_rate": 2.380952380952381e-06,
"loss": 1.2735,
"step": 10
},
{
"epoch": 0.14356213548676536,
"grad_norm": 0.17795707285404205,
"learning_rate": 4.761904761904762e-06,
"loss": 0.9311,
"step": 20
},
{
"epoch": 0.21534320323014805,
"grad_norm": 0.07722567766904831,
"learning_rate": 7.1428571428571436e-06,
"loss": 0.7258,
"step": 30
},
{
"epoch": 0.2871242709735307,
"grad_norm": 0.058059412986040115,
"learning_rate": 9.523809523809525e-06,
"loss": 0.6562,
"step": 40
},
{
"epoch": 0.35890533871691344,
"grad_norm": 0.05688474327325821,
"learning_rate": 9.786666666666667e-06,
"loss": 0.6236,
"step": 50
},
{
"epoch": 0.4306864064602961,
"grad_norm": 0.0652860477566719,
"learning_rate": 9.52e-06,
"loss": 0.6011,
"step": 60
},
{
"epoch": 0.5024674742036788,
"grad_norm": 0.09465105831623077,
"learning_rate": 9.253333333333333e-06,
"loss": 0.5775,
"step": 70
},
{
"epoch": 0.5742485419470614,
"grad_norm": 0.16655535995960236,
"learning_rate": 8.986666666666666e-06,
"loss": 0.5588,
"step": 80
},
{
"epoch": 0.6460296096904441,
"grad_norm": 0.11725710332393646,
"learning_rate": 8.720000000000001e-06,
"loss": 0.537,
"step": 90
},
{
"epoch": 0.7178106774338269,
"grad_norm": 0.044058505445718765,
"learning_rate": 8.453333333333334e-06,
"loss": 0.5187,
"step": 100
},
{
"epoch": 0.7895917451772095,
"grad_norm": 0.022063592448830605,
"learning_rate": 8.186666666666667e-06,
"loss": 0.51,
"step": 110
},
{
"epoch": 0.8613728129205922,
"grad_norm": 0.021037070080637932,
"learning_rate": 7.92e-06,
"loss": 0.5043,
"step": 120
},
{
"epoch": 0.9331538806639749,
"grad_norm": 0.01985151134431362,
"learning_rate": 7.653333333333333e-06,
"loss": 0.4985,
"step": 130
},
{
"epoch": 1.0058322117541498,
"grad_norm": 0.04843816161155701,
"learning_rate": 7.386666666666667e-06,
"loss": 0.5345,
"step": 140
},
{
"epoch": 1.0776132794975326,
"grad_norm": 0.02092103101313114,
"learning_rate": 7.1200000000000004e-06,
"loss": 0.4855,
"step": 150
},
{
"epoch": 1.1493943472409152,
"grad_norm": 0.020286045968532562,
"learning_rate": 6.853333333333334e-06,
"loss": 0.4794,
"step": 160
},
{
"epoch": 1.221175414984298,
"grad_norm": 0.020133651793003082,
"learning_rate": 6.5866666666666666e-06,
"loss": 0.4779,
"step": 170
},
{
"epoch": 1.2929564827276807,
"grad_norm": 0.020597418770194054,
"learning_rate": 6.3200000000000005e-06,
"loss": 0.4738,
"step": 180
},
{
"epoch": 1.3647375504710633,
"grad_norm": 0.020543133839964867,
"learning_rate": 6.0533333333333335e-06,
"loss": 0.4713,
"step": 190
},
{
"epoch": 1.4365186182144458,
"grad_norm": 0.0203793253749609,
"learning_rate": 5.7866666666666674e-06,
"loss": 0.4665,
"step": 200
},
{
"epoch": 1.5082996859578286,
"grad_norm": 0.020146360620856285,
"learning_rate": 5.5200000000000005e-06,
"loss": 0.4627,
"step": 210
},
{
"epoch": 1.5800807537012114,
"grad_norm": 0.020714716985821724,
"learning_rate": 5.2533333333333336e-06,
"loss": 0.4609,
"step": 220
},
{
"epoch": 1.651861821444594,
"grad_norm": 0.020331306383013725,
"learning_rate": 4.986666666666667e-06,
"loss": 0.458,
"step": 230
},
{
"epoch": 1.7236428891879767,
"grad_norm": 0.019636554643511772,
"learning_rate": 4.7200000000000005e-06,
"loss": 0.4559,
"step": 240
},
{
"epoch": 1.7954239569313595,
"grad_norm": 0.020189929753541946,
"learning_rate": 4.453333333333334e-06,
"loss": 0.454,
"step": 250
},
{
"epoch": 1.867205024674742,
"grad_norm": 0.020626794546842575,
"learning_rate": 4.1866666666666675e-06,
"loss": 0.4507,
"step": 260
},
{
"epoch": 1.9389860924181246,
"grad_norm": 0.02100289985537529,
"learning_rate": 3.920000000000001e-06,
"loss": 0.4498,
"step": 270
},
{
"epoch": 2.0116644235082997,
"grad_norm": 0.020512910559773445,
"learning_rate": 3.6533333333333336e-06,
"loss": 0.4824,
"step": 280
},
{
"epoch": 2.083445491251682,
"grad_norm": 0.02125644125044346,
"learning_rate": 3.386666666666667e-06,
"loss": 0.4383,
"step": 290
},
{
"epoch": 2.155226558995065,
"grad_norm": 0.021306023001670837,
"learning_rate": 3.12e-06,
"loss": 0.4377,
"step": 300
}
],
"logging_steps": 10,
"max_steps": 417,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8.035788892038758e+17,
"train_batch_size": 14,
"trial_name": null,
"trial_params": null
}