|
{ |
|
"best_metric": 0.9428571428571428, |
|
"best_model_checkpoint": "videomae-base-finetuned-ucf101-subset/checkpoint-148", |
|
"epoch": 7.101351351351352, |
|
"eval_steps": 500, |
|
"global_step": 148, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06756756756756757, |
|
"grad_norm": 5.423322677612305, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 2.3498, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12837837837837837, |
|
"eval_accuracy": 0.21428571428571427, |
|
"eval_loss": 2.089395046234131, |
|
"eval_runtime": 4.4737, |
|
"eval_samples_per_second": 15.647, |
|
"eval_steps_per_second": 1.118, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.0067567567567568, |
|
"grad_norm": 6.005563259124756, |
|
"learning_rate": 4.81203007518797e-05, |
|
"loss": 2.234, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.0743243243243243, |
|
"grad_norm": 5.659995079040527, |
|
"learning_rate": 4.43609022556391e-05, |
|
"loss": 1.9304, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.1283783783783783, |
|
"eval_accuracy": 0.4857142857142857, |
|
"eval_loss": 1.5130811929702759, |
|
"eval_runtime": 4.3325, |
|
"eval_samples_per_second": 16.157, |
|
"eval_steps_per_second": 1.154, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.0135135135135136, |
|
"grad_norm": 6.728376865386963, |
|
"learning_rate": 4.0601503759398494e-05, |
|
"loss": 1.4823, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.081081081081081, |
|
"grad_norm": 7.871072292327881, |
|
"learning_rate": 3.6842105263157895e-05, |
|
"loss": 1.099, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.1283783783783785, |
|
"eval_accuracy": 0.6857142857142857, |
|
"eval_loss": 0.8322275280952454, |
|
"eval_runtime": 4.3072, |
|
"eval_samples_per_second": 16.252, |
|
"eval_steps_per_second": 1.161, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 3.02027027027027, |
|
"grad_norm": 5.636988162994385, |
|
"learning_rate": 3.3082706766917295e-05, |
|
"loss": 0.7533, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 3.0878378378378377, |
|
"grad_norm": 4.6487226486206055, |
|
"learning_rate": 2.9323308270676693e-05, |
|
"loss": 0.4725, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 3.1283783783783785, |
|
"eval_accuracy": 0.7714285714285715, |
|
"eval_loss": 0.5527699589729309, |
|
"eval_runtime": 4.2206, |
|
"eval_samples_per_second": 16.585, |
|
"eval_steps_per_second": 1.185, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 4.027027027027027, |
|
"grad_norm": 2.4070096015930176, |
|
"learning_rate": 2.556390977443609e-05, |
|
"loss": 0.4228, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 4.094594594594595, |
|
"grad_norm": 7.774806499481201, |
|
"learning_rate": 2.1804511278195487e-05, |
|
"loss": 0.3629, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 4.128378378378378, |
|
"eval_accuracy": 0.8571428571428571, |
|
"eval_loss": 0.3821955621242523, |
|
"eval_runtime": 4.4076, |
|
"eval_samples_per_second": 15.882, |
|
"eval_steps_per_second": 1.134, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 5.033783783783784, |
|
"grad_norm": 6.296115398406982, |
|
"learning_rate": 1.8045112781954888e-05, |
|
"loss": 0.248, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 5.101351351351352, |
|
"grad_norm": 7.289224147796631, |
|
"learning_rate": 1.4285714285714285e-05, |
|
"loss": 0.196, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 5.128378378378378, |
|
"eval_accuracy": 0.9142857142857143, |
|
"eval_loss": 0.30108556151390076, |
|
"eval_runtime": 3.9981, |
|
"eval_samples_per_second": 17.508, |
|
"eval_steps_per_second": 1.251, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 6.04054054054054, |
|
"grad_norm": 3.051034450531006, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.1786, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 6.108108108108108, |
|
"grad_norm": 4.523892879486084, |
|
"learning_rate": 6.766917293233083e-06, |
|
"loss": 0.1357, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 6.128378378378378, |
|
"eval_accuracy": 0.9142857142857143, |
|
"eval_loss": 0.2538822889328003, |
|
"eval_runtime": 4.2283, |
|
"eval_samples_per_second": 16.555, |
|
"eval_steps_per_second": 1.183, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 7.047297297297297, |
|
"grad_norm": 0.9887262582778931, |
|
"learning_rate": 3.007518796992481e-06, |
|
"loss": 0.0847, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 7.101351351351352, |
|
"eval_accuracy": 0.9428571428571428, |
|
"eval_loss": 0.19233502447605133, |
|
"eval_runtime": 4.6688, |
|
"eval_samples_per_second": 14.993, |
|
"eval_steps_per_second": 1.071, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 7.101351351351352, |
|
"step": 148, |
|
"total_flos": 2.915999166844109e+18, |
|
"train_loss": 0.8118188864475971, |
|
"train_runtime": 290.0311, |
|
"train_samples_per_second": 8.165, |
|
"train_steps_per_second": 0.51 |
|
}, |
|
{ |
|
"epoch": 7.101351351351352, |
|
"eval_accuracy": 0.8451612903225807, |
|
"eval_loss": 0.44350922107696533, |
|
"eval_runtime": 9.9685, |
|
"eval_samples_per_second": 15.549, |
|
"eval_steps_per_second": 1.003, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 7.101351351351352, |
|
"eval_accuracy": 0.8451612903225807, |
|
"eval_loss": 0.44481009244918823, |
|
"eval_runtime": 9.6598, |
|
"eval_samples_per_second": 16.046, |
|
"eval_steps_per_second": 1.035, |
|
"step": 148 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 148, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.915999166844109e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|