llama-guard-finetuned-3ep-800 / trainer_state.json
Anuvab's picture
Model save
1ff461e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.6004319654427646,
"eval_steps": 500,
"global_step": 139,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11,
"grad_norm": 0.675453782081604,
"learning_rate": 0.0002,
"loss": 2.08,
"step": 25
},
{
"epoch": 0.22,
"grad_norm": 0.3721127510070801,
"learning_rate": 0.0002,
"loss": 0.3546,
"step": 50
},
{
"epoch": 0.32,
"grad_norm": 0.161994069814682,
"learning_rate": 0.0002,
"loss": 0.2689,
"step": 75
},
{
"epoch": 0.43,
"grad_norm": 0.08174678683280945,
"learning_rate": 0.0002,
"loss": 0.1425,
"step": 100
},
{
"epoch": 0.54,
"grad_norm": 0.1015535444021225,
"learning_rate": 0.0002,
"loss": 0.2211,
"step": 125
},
{
"epoch": 0.6,
"step": 139,
"total_flos": 9030484680671232.0,
"train_loss": 0.566877440582934,
"train_runtime": 2411.1952,
"train_samples_per_second": 0.461,
"train_steps_per_second": 0.058
}
],
"logging_steps": 25,
"max_steps": 139,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"total_flos": 9030484680671232.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}