llama-3-lora / checkpoint-186 /trainer_state.json
mridul161203's picture
Upload folder using huggingface_hub
308d24f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.976,
"eval_steps": 500,
"global_step": 186,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16,
"grad_norm": 0.42903047800064087,
"learning_rate": 2.6315789473684212e-05,
"loss": 1.2337,
"step": 10
},
{
"epoch": 0.32,
"grad_norm": 0.4659309387207031,
"learning_rate": 4.999557652060729e-05,
"loss": 1.1471,
"step": 20
},
{
"epoch": 0.48,
"grad_norm": 0.730895459651947,
"learning_rate": 4.946665048328287e-05,
"loss": 1.0505,
"step": 30
},
{
"epoch": 0.64,
"grad_norm": 0.6177507638931274,
"learning_rate": 4.807442755497524e-05,
"loss": 1.054,
"step": 40
},
{
"epoch": 0.8,
"grad_norm": 1.0368133783340454,
"learning_rate": 4.586803181690609e-05,
"loss": 1.0488,
"step": 50
},
{
"epoch": 0.96,
"grad_norm": 0.5574620962142944,
"learning_rate": 4.292531514268008e-05,
"loss": 1.0591,
"step": 60
},
{
"epoch": 1.12,
"grad_norm": 0.6061655282974243,
"learning_rate": 3.9350110223152844e-05,
"loss": 0.8107,
"step": 70
},
{
"epoch": 1.28,
"grad_norm": 1.3221992254257202,
"learning_rate": 3.526856686758269e-05,
"loss": 0.7167,
"step": 80
},
{
"epoch": 1.44,
"grad_norm": 1.6188404560089111,
"learning_rate": 3.082470085335133e-05,
"loss": 0.6753,
"step": 90
},
{
"epoch": 1.6,
"grad_norm": 0.9393795728683472,
"learning_rate": 2.6175312381477442e-05,
"loss": 0.6811,
"step": 100
},
{
"epoch": 1.76,
"grad_norm": 0.7857360243797302,
"learning_rate": 2.148445343837755e-05,
"loss": 0.7164,
"step": 110
},
{
"epoch": 1.92,
"grad_norm": 0.43257200717926025,
"learning_rate": 1.69176392810087e-05,
"loss": 0.7362,
"step": 120
},
{
"epoch": 2.08,
"grad_norm": 0.616813600063324,
"learning_rate": 1.2636008291040618e-05,
"loss": 0.7095,
"step": 130
},
{
"epoch": 2.24,
"grad_norm": 1.0762410163879395,
"learning_rate": 8.790636265485334e-06,
"loss": 0.4147,
"step": 140
},
{
"epoch": 2.4,
"grad_norm": 0.7847151160240173,
"learning_rate": 5.51720576197794e-06,
"loss": 0.4341,
"step": 150
},
{
"epoch": 2.56,
"grad_norm": 0.9602726101875305,
"learning_rate": 2.931218588927315e-06,
"loss": 0.4104,
"step": 160
},
{
"epoch": 2.7199999999999998,
"grad_norm": 0.5409191846847534,
"learning_rate": 1.1239203660860648e-06,
"loss": 0.4513,
"step": 170
},
{
"epoch": 2.88,
"grad_norm": 0.8361196517944336,
"learning_rate": 1.5908095594207583e-07,
"loss": 0.3921,
"step": 180
}
],
"logging_steps": 10,
"max_steps": 186,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.6182242199207936e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}