KoAlpaca-65B-LoRA / trainer_state.json
beomi's picture
Add 1epoch model
6a491b9
raw
history blame
4.88 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.998109640831758,
"global_step": 396,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 5e-06,
"loss": 1.6488,
"step": 10
},
{
"epoch": 0.05,
"learning_rate": 1e-05,
"loss": 1.6585,
"step": 20
},
{
"epoch": 0.08,
"learning_rate": 1.5e-05,
"loss": 1.6373,
"step": 30
},
{
"epoch": 0.1,
"learning_rate": 2e-05,
"loss": 1.5933,
"step": 40
},
{
"epoch": 0.13,
"learning_rate": 2.5e-05,
"loss": 1.4982,
"step": 50
},
{
"epoch": 0.15,
"learning_rate": 3e-05,
"loss": 1.3297,
"step": 60
},
{
"epoch": 0.18,
"learning_rate": 3.5e-05,
"loss": 1.1515,
"step": 70
},
{
"epoch": 0.2,
"learning_rate": 4e-05,
"loss": 0.9944,
"step": 80
},
{
"epoch": 0.23,
"learning_rate": 4.5e-05,
"loss": 0.8887,
"step": 90
},
{
"epoch": 0.25,
"learning_rate": 5e-05,
"loss": 0.8275,
"step": 100
},
{
"epoch": 0.28,
"learning_rate": 4.954044117647059e-05,
"loss": 0.8152,
"step": 110
},
{
"epoch": 0.3,
"learning_rate": 4.908088235294118e-05,
"loss": 0.7991,
"step": 120
},
{
"epoch": 0.33,
"learning_rate": 4.8621323529411765e-05,
"loss": 0.7932,
"step": 130
},
{
"epoch": 0.35,
"learning_rate": 4.816176470588236e-05,
"loss": 0.7928,
"step": 140
},
{
"epoch": 0.38,
"learning_rate": 4.7702205882352946e-05,
"loss": 0.7968,
"step": 150
},
{
"epoch": 0.4,
"learning_rate": 4.7242647058823534e-05,
"loss": 0.7744,
"step": 160
},
{
"epoch": 0.43,
"learning_rate": 4.678308823529412e-05,
"loss": 0.7791,
"step": 170
},
{
"epoch": 0.45,
"learning_rate": 4.632352941176471e-05,
"loss": 0.7736,
"step": 180
},
{
"epoch": 0.48,
"learning_rate": 4.5863970588235296e-05,
"loss": 0.766,
"step": 190
},
{
"epoch": 0.5,
"learning_rate": 4.5404411764705883e-05,
"loss": 0.7717,
"step": 200
},
{
"epoch": 0.53,
"learning_rate": 4.494485294117647e-05,
"loss": 0.7638,
"step": 210
},
{
"epoch": 0.55,
"learning_rate": 4.448529411764706e-05,
"loss": 0.768,
"step": 220
},
{
"epoch": 0.58,
"learning_rate": 4.4025735294117646e-05,
"loss": 0.7573,
"step": 230
},
{
"epoch": 0.6,
"learning_rate": 4.356617647058824e-05,
"loss": 0.7633,
"step": 240
},
{
"epoch": 0.63,
"learning_rate": 4.310661764705883e-05,
"loss": 0.7562,
"step": 250
},
{
"epoch": 0.66,
"learning_rate": 4.2647058823529415e-05,
"loss": 0.7576,
"step": 260
},
{
"epoch": 0.68,
"learning_rate": 4.21875e-05,
"loss": 0.7516,
"step": 270
},
{
"epoch": 0.71,
"learning_rate": 4.172794117647059e-05,
"loss": 0.7501,
"step": 280
},
{
"epoch": 0.73,
"learning_rate": 4.126838235294118e-05,
"loss": 0.7601,
"step": 290
},
{
"epoch": 0.76,
"learning_rate": 4.0808823529411765e-05,
"loss": 0.7502,
"step": 300
},
{
"epoch": 0.78,
"learning_rate": 4.034926470588236e-05,
"loss": 0.7583,
"step": 310
},
{
"epoch": 0.81,
"learning_rate": 3.9889705882352946e-05,
"loss": 0.756,
"step": 320
},
{
"epoch": 0.83,
"learning_rate": 3.943014705882353e-05,
"loss": 0.7511,
"step": 330
},
{
"epoch": 0.86,
"learning_rate": 3.897058823529412e-05,
"loss": 0.7434,
"step": 340
},
{
"epoch": 0.88,
"learning_rate": 3.851102941176471e-05,
"loss": 0.7586,
"step": 350
},
{
"epoch": 0.91,
"learning_rate": 3.8051470588235296e-05,
"loss": 0.7486,
"step": 360
},
{
"epoch": 0.93,
"learning_rate": 3.759191176470588e-05,
"loss": 0.7499,
"step": 370
},
{
"epoch": 0.96,
"learning_rate": 3.713235294117647e-05,
"loss": 0.7516,
"step": 380
},
{
"epoch": 0.98,
"learning_rate": 3.667279411764706e-05,
"loss": 0.7451,
"step": 390
}
],
"max_steps": 1188,
"num_train_epochs": 3,
"total_flos": 2.029497948313328e+19,
"trial_name": null,
"trial_params": null
}