ppo-Huggy / run_logs /timers.json
derican0's picture
Huggy
f86e1d5 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4064135551452637,
"min": 1.4064135551452637,
"max": 1.4307072162628174,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70722.9140625,
"min": 67234.125,
"max": 76619.6640625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.51762523191094,
"min": 76.82426127527216,
"max": 374.9624060150376,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49867.0,
"min": 48747.0,
"max": 50174.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999690.0,
"min": 49651.0,
"max": 1999690.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999690.0,
"min": 49651.0,
"max": 1999690.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.423640489578247,
"min": 0.11599068343639374,
"max": 2.5011682510375977,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1306.34228515625,
"min": 15.310770034790039,
"max": 1571.7327880859375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.677600664973038,
"min": 1.7459711441487977,
"max": 4.0250406051371375,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1982.2267584204674,
"min": 230.4681910276413,
"max": 2500.044366300106,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.677600664973038,
"min": 1.7459711441487977,
"max": 4.0250406051371375,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1982.2267584204674,
"min": 230.4681910276413,
"max": 2500.044366300106,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01588540130275457,
"min": 0.013356235990553009,
"max": 0.020258982046410287,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04765620390826371,
"min": 0.026712471981106017,
"max": 0.057171170967922076,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.053918747603893286,
"min": 0.021967281494289635,
"max": 0.06078557725995779,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16175624281167986,
"min": 0.04393456298857927,
"max": 0.18167083971202375,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4755988414999863e-06,
"min": 3.4755988414999863e-06,
"max": 0.00029538345153885,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0426796524499959e-05,
"min": 1.0426796524499959e-05,
"max": 0.00084444646851785,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10115849999999998,
"min": 0.10115849999999998,
"max": 0.19846115000000009,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30347549999999995,
"min": 0.20744215,
"max": 0.58148215,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.78091499999998e-05,
"min": 6.78091499999998e-05,
"max": 0.004923211384999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002034274499999994,
"min": 0.0002034274499999994,
"max": 0.014075959285,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739500823",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739503228"
},
"total": 2405.545457483,
"count": 1,
"self": 0.44287363399962487,
"children": {
"run_training.setup": {
"total": 0.021872565000080613,
"count": 1,
"self": 0.021872565000080613
},
"TrainerController.start_learning": {
"total": 2405.0807112840002,
"count": 1,
"self": 4.120088174992361,
"children": {
"TrainerController._reset_env": {
"total": 3.094055287999936,
"count": 1,
"self": 3.094055287999936
},
"TrainerController.advance": {
"total": 2397.7520421240083,
"count": 232904,
"self": 4.317443073156028,
"children": {
"env_step": {
"total": 1904.0157375110023,
"count": 232904,
"self": 1486.0602326729086,
"children": {
"SubprocessEnvManager._take_step": {
"total": 415.3952885640522,
"count": 232904,
"self": 15.161237796903265,
"children": {
"TorchPolicy.evaluate": {
"total": 400.2340507671489,
"count": 223006,
"self": 400.2340507671489
}
}
},
"workers": {
"total": 2.5602162740414087,
"count": 232904,
"self": 0.0,
"children": {
"worker_root": {
"total": 2397.726172286023,
"count": 232904,
"is_parallel": true,
"self": 1188.396827396079,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010176319999573025,
"count": 1,
"is_parallel": true,
"self": 0.00026371999990715267,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007539120000501498,
"count": 2,
"is_parallel": true,
"self": 0.0007539120000501498
}
}
},
"UnityEnvironment.step": {
"total": 0.028628386999912436,
"count": 1,
"is_parallel": true,
"self": 0.0003147039998339096,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001819870000190349,
"count": 1,
"is_parallel": true,
"self": 0.0001819870000190349
},
"communicator.exchange": {
"total": 0.027464387000009083,
"count": 1,
"is_parallel": true,
"self": 0.027464387000009083
},
"steps_from_proto": {
"total": 0.0006673090000504089,
"count": 1,
"is_parallel": true,
"self": 0.00018665300001430296,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000480656000036106,
"count": 2,
"is_parallel": true,
"self": 0.000480656000036106
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1209.329344889944,
"count": 232903,
"is_parallel": true,
"self": 36.347815965884365,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.15992033897544,
"count": 232903,
"is_parallel": true,
"self": 78.15992033897544
},
"communicator.exchange": {
"total": 1010.7105638990254,
"count": 232903,
"is_parallel": true,
"self": 1010.7105638990254
},
"steps_from_proto": {
"total": 84.11104468605879,
"count": 232903,
"is_parallel": true,
"self": 29.626602853926897,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.48444183213189,
"count": 465806,
"is_parallel": true,
"self": 54.48444183213189
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 489.4188615398501,
"count": 232904,
"self": 5.974902236752769,
"children": {
"process_trajectory": {
"total": 161.9247523700991,
"count": 232904,
"self": 160.68804413509895,
"children": {
"RLTrainer._checkpoint": {
"total": 1.236708235000151,
"count": 10,
"self": 1.236708235000151
}
}
},
"_update_policy": {
"total": 321.51920693299826,
"count": 97,
"self": 256.1318334559992,
"children": {
"TorchPPOOptimizer.update": {
"total": 65.38737347699907,
"count": 2910,
"self": 65.38737347699907
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4600000213249587e-06,
"count": 1,
"self": 1.4600000213249587e-06
},
"TrainerController._save_models": {
"total": 0.11452423699984138,
"count": 1,
"self": 0.0019183219997103151,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11260591500013106,
"count": 1,
"self": 0.11260591500013106
}
}
}
}
}
}
}