MHGanainy's picture
MHGanainy/gpt2-xl-lora-ecthr-random-imbalanced-skewed-cluster-8-id-0-blah
52ab55c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1336,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0748502994011976,
"grad_norm": 0.16372738778591156,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.4252,
"step": 100
},
{
"epoch": 0.1497005988023952,
"grad_norm": 0.27203845977783203,
"learning_rate": 4.000000000000001e-06,
"loss": 2.4077,
"step": 200
},
{
"epoch": 0.2245508982035928,
"grad_norm": 0.27170997858047485,
"learning_rate": 6e-06,
"loss": 2.3881,
"step": 300
},
{
"epoch": 0.2994011976047904,
"grad_norm": 0.383687824010849,
"learning_rate": 8.000000000000001e-06,
"loss": 2.3753,
"step": 400
},
{
"epoch": 0.37425149700598803,
"grad_norm": 0.4785425662994385,
"learning_rate": 1e-05,
"loss": 2.2923,
"step": 500
},
{
"epoch": 0.4491017964071856,
"grad_norm": 0.5393449664115906,
"learning_rate": 1.2e-05,
"loss": 2.2659,
"step": 600
},
{
"epoch": 0.5239520958083832,
"grad_norm": 0.538110077381134,
"learning_rate": 1.4e-05,
"loss": 2.2294,
"step": 700
},
{
"epoch": 0.5988023952095808,
"grad_norm": 0.7190410494804382,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.1935,
"step": 800
},
{
"epoch": 0.6736526946107785,
"grad_norm": 0.8610377311706543,
"learning_rate": 1.8e-05,
"loss": 2.1684,
"step": 900
},
{
"epoch": 0.7485029940119761,
"grad_norm": 1.113236665725708,
"learning_rate": 2e-05,
"loss": 2.178,
"step": 1000
},
{
"epoch": 0.8233532934131736,
"grad_norm": 0.762846827507019,
"learning_rate": 1.5938201855735017e-05,
"loss": 2.1273,
"step": 1100
},
{
"epoch": 0.8982035928143712,
"grad_norm": 0.8103228211402893,
"learning_rate": 7.052448255890958e-06,
"loss": 2.148,
"step": 1200
},
{
"epoch": 0.9730538922155688,
"grad_norm": 0.8717363476753235,
"learning_rate": 5.611666969163243e-07,
"loss": 2.0705,
"step": 1300
},
{
"epoch": 1.0,
"step": 1336,
"total_flos": 2.4464433119232e+16,
"train_loss": 2.247535785515151,
"train_runtime": 357.9632,
"train_samples_per_second": 7.462,
"train_steps_per_second": 3.732
}
],
"logging_steps": 100,
"max_steps": 1336,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.4464433119232e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}