llavaov-vd-16-400-1e5 / trainer_state.json
tyzhu's picture
Upload folder using huggingface_hub
c55d44b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9900199600798403,
"eval_steps": 500,
"global_step": 62,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 34.680301666259766,
"learning_rate": 1e-05,
"loss": 16.3759,
"step": 1
},
{
"epoch": 0.03,
"grad_norm": 3.369790554046631,
"learning_rate": 1e-05,
"loss": 0.6651,
"step": 2
},
{
"epoch": 0.05,
"grad_norm": 3.2419369220733643,
"learning_rate": 1e-05,
"loss": 0.5262,
"step": 3
},
{
"epoch": 0.06,
"grad_norm": 2.575935125350952,
"learning_rate": 1e-05,
"loss": 0.446,
"step": 4
},
{
"epoch": 0.08,
"grad_norm": 2.4852099418640137,
"learning_rate": 1e-05,
"loss": 0.3491,
"step": 5
},
{
"epoch": 0.1,
"grad_norm": 0.8563106060028076,
"learning_rate": 1e-05,
"loss": 0.3045,
"step": 6
},
{
"epoch": 0.11,
"grad_norm": 4.232894420623779,
"learning_rate": 1e-05,
"loss": 0.2871,
"step": 7
},
{
"epoch": 0.13,
"grad_norm": 0.6341336965560913,
"learning_rate": 1e-05,
"loss": 0.2612,
"step": 8
},
{
"epoch": 0.14,
"grad_norm": 0.8098199963569641,
"learning_rate": 1e-05,
"loss": 591.0046,
"step": 9
},
{
"epoch": 0.16,
"grad_norm": 0.407880961894989,
"learning_rate": 1e-05,
"loss": 0.2701,
"step": 10
},
{
"epoch": 0.18,
"grad_norm": 0.34948238730430603,
"learning_rate": 1e-05,
"loss": 0.277,
"step": 11
},
{
"epoch": 0.19,
"grad_norm": 0.3844144642353058,
"learning_rate": 1e-05,
"loss": 0.2608,
"step": 12
},
{
"epoch": 0.21,
"grad_norm": 0.39868655800819397,
"learning_rate": 1e-05,
"loss": 0.257,
"step": 13
},
{
"epoch": 0.22,
"grad_norm": 0.3178333640098572,
"learning_rate": 1e-05,
"loss": 0.242,
"step": 14
},
{
"epoch": 0.24,
"grad_norm": 0.28359681367874146,
"learning_rate": 1e-05,
"loss": 0.2391,
"step": 15
},
{
"epoch": 0.26,
"grad_norm": 0.3104497790336609,
"learning_rate": 1e-05,
"loss": 0.2297,
"step": 16
},
{
"epoch": 0.27,
"grad_norm": 0.7799801826477051,
"learning_rate": 1e-05,
"loss": 81.355,
"step": 17
},
{
"epoch": 0.29,
"grad_norm": 0.2582087218761444,
"learning_rate": 1e-05,
"loss": 0.2603,
"step": 18
},
{
"epoch": 0.3,
"grad_norm": 0.2229820191860199,
"learning_rate": 1e-05,
"loss": 0.255,
"step": 19
},
{
"epoch": 0.32,
"grad_norm": 0.2470751404762268,
"learning_rate": 1e-05,
"loss": 0.2487,
"step": 20
},
{
"epoch": 0.34,
"grad_norm": 0.28791287541389465,
"learning_rate": 1e-05,
"loss": 0.2408,
"step": 21
},
{
"epoch": 0.35,
"grad_norm": 0.2907260060310364,
"learning_rate": 1e-05,
"loss": 0.2396,
"step": 22
},
{
"epoch": 0.37,
"grad_norm": 0.2796117663383484,
"learning_rate": 1e-05,
"loss": 0.2281,
"step": 23
},
{
"epoch": 0.38,
"grad_norm": 0.3333979547023773,
"learning_rate": 1e-05,
"loss": 0.2261,
"step": 24
},
{
"epoch": 0.4,
"grad_norm": 0.4846266508102417,
"learning_rate": 1e-05,
"loss": 63.2997,
"step": 25
},
{
"epoch": 0.42,
"grad_norm": 0.20356160402297974,
"learning_rate": 1e-05,
"loss": 0.2481,
"step": 26
},
{
"epoch": 0.43,
"grad_norm": 0.2213103324174881,
"learning_rate": 1e-05,
"loss": 0.2387,
"step": 27
},
{
"epoch": 0.45,
"grad_norm": 0.26175907254219055,
"learning_rate": 1e-05,
"loss": 0.2475,
"step": 28
},
{
"epoch": 0.46,
"grad_norm": 0.2190675288438797,
"learning_rate": 1e-05,
"loss": 0.2379,
"step": 29
},
{
"epoch": 0.48,
"grad_norm": 0.24093040823936462,
"learning_rate": 1e-05,
"loss": 0.239,
"step": 30
},
{
"epoch": 0.5,
"grad_norm": 0.277065247297287,
"learning_rate": 1e-05,
"loss": 0.2362,
"step": 31
},
{
"epoch": 0.51,
"grad_norm": 0.33813706040382385,
"learning_rate": 1e-05,
"loss": 0.2174,
"step": 32
},
{
"epoch": 0.53,
"grad_norm": 0.40611541271209717,
"learning_rate": 1e-05,
"loss": 203.4452,
"step": 33
},
{
"epoch": 0.54,
"grad_norm": 0.2279174029827118,
"learning_rate": 1e-05,
"loss": 0.2388,
"step": 34
},
{
"epoch": 0.56,
"grad_norm": 0.21085476875305176,
"learning_rate": 1e-05,
"loss": 0.2417,
"step": 35
},
{
"epoch": 0.57,
"grad_norm": 0.2684917151927948,
"learning_rate": 1e-05,
"loss": 0.2426,
"step": 36
},
{
"epoch": 0.59,
"grad_norm": 0.24274484813213348,
"learning_rate": 1e-05,
"loss": 0.2409,
"step": 37
},
{
"epoch": 0.61,
"grad_norm": 0.26018860936164856,
"learning_rate": 1e-05,
"loss": 0.2212,
"step": 38
},
{
"epoch": 0.62,
"grad_norm": 0.2579266130924225,
"learning_rate": 1e-05,
"loss": 0.2281,
"step": 39
},
{
"epoch": 0.64,
"grad_norm": 0.31496453285217285,
"learning_rate": 1e-05,
"loss": 0.2149,
"step": 40
},
{
"epoch": 0.65,
"grad_norm": 0.4951270520687103,
"learning_rate": 1e-05,
"loss": 126.4606,
"step": 41
},
{
"epoch": 0.67,
"grad_norm": 0.2128014862537384,
"learning_rate": 1e-05,
"loss": 0.2417,
"step": 42
},
{
"epoch": 0.69,
"grad_norm": 0.23777072131633759,
"learning_rate": 1e-05,
"loss": 0.2327,
"step": 43
},
{
"epoch": 0.7,
"grad_norm": 0.20361731946468353,
"learning_rate": 1e-05,
"loss": 0.2301,
"step": 44
},
{
"epoch": 0.72,
"grad_norm": 0.2542952001094818,
"learning_rate": 1e-05,
"loss": 0.2406,
"step": 45
},
{
"epoch": 0.73,
"grad_norm": 0.283297598361969,
"learning_rate": 1e-05,
"loss": 0.2328,
"step": 46
},
{
"epoch": 0.75,
"grad_norm": 0.261777400970459,
"learning_rate": 1e-05,
"loss": 0.2246,
"step": 47
},
{
"epoch": 0.77,
"grad_norm": 0.27042126655578613,
"learning_rate": 1e-05,
"loss": 0.2136,
"step": 48
},
{
"epoch": 0.78,
"grad_norm": 0.45323169231414795,
"learning_rate": 1e-05,
"loss": 37.1518,
"step": 49
},
{
"epoch": 0.8,
"grad_norm": 0.19414253532886505,
"learning_rate": 1e-05,
"loss": 0.2423,
"step": 50
},
{
"epoch": 0.81,
"grad_norm": 0.2136753797531128,
"learning_rate": 1e-05,
"loss": 0.237,
"step": 51
},
{
"epoch": 0.83,
"grad_norm": 0.23989234864711761,
"learning_rate": 1e-05,
"loss": 0.2348,
"step": 52
},
{
"epoch": 0.85,
"grad_norm": 0.23847776651382446,
"learning_rate": 1e-05,
"loss": 0.2357,
"step": 53
},
{
"epoch": 0.86,
"grad_norm": 0.2265355885028839,
"learning_rate": 1e-05,
"loss": 0.2276,
"step": 54
},
{
"epoch": 0.88,
"grad_norm": 0.2720184326171875,
"learning_rate": 1e-05,
"loss": 0.2215,
"step": 55
},
{
"epoch": 0.89,
"grad_norm": 0.32160285115242004,
"learning_rate": 1e-05,
"loss": 0.2123,
"step": 56
},
{
"epoch": 0.91,
"grad_norm": 0.4474925100803375,
"learning_rate": 1e-05,
"loss": 82.7252,
"step": 57
},
{
"epoch": 0.93,
"grad_norm": 0.22881349921226501,
"learning_rate": 1e-05,
"loss": 0.2473,
"step": 58
},
{
"epoch": 0.94,
"grad_norm": 0.2213418185710907,
"learning_rate": 1e-05,
"loss": 0.2329,
"step": 59
},
{
"epoch": 0.96,
"grad_norm": 0.23734869062900543,
"learning_rate": 1e-05,
"loss": 0.2313,
"step": 60
},
{
"epoch": 0.97,
"grad_norm": 0.26081162691116333,
"learning_rate": 1e-05,
"loss": 0.2365,
"step": 61
},
{
"epoch": 0.99,
"grad_norm": 0.29037076234817505,
"learning_rate": 1e-05,
"loss": 0.2291,
"step": 62
},
{
"epoch": 0.99,
"step": 62,
"total_flos": 190914982051840.0,
"train_loss": 19.610142173545977,
"train_runtime": 47287.2785,
"train_samples_per_second": 0.021,
"train_steps_per_second": 0.001
}
],
"logging_steps": 1.0,
"max_steps": 62,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 20,
"total_flos": 190914982051840.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}