{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4070675373077393, "min": 1.4070675373077393, "max": 1.4280903339385986, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 69955.1796875, "min": 68212.453125, "max": 78966.171875, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 93.2810707456979, "min": 87.7921146953405, "max": 417.78333333333336, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 48786.0, "min": 48786.0, "max": 50144.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999951.0, "min": 49933.0, "max": 1999951.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999951.0, "min": 49933.0, "max": 1999951.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.433748722076416, "min": 0.10892074555158615, "max": 2.4701175689697266, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1272.8505859375, "min": 12.961568832397461, "max": 1365.446533203125, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.8273371044354048, "min": 1.9597367505315972, "max": 3.905594036923892, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2001.6973056197166, "min": 233.20867331326008, "max": 2124.1000408530235, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.8273371044354048, "min": 1.9597367505315972, "max": 3.905594036923892, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2001.6973056197166, "min": 233.20867331326008, "max": 2124.1000408530235, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.014144179469440132, "min": 0.013793812167326298, "max": 0.019536686017818286, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.028288358938880265, "min": 0.027587624334652595, "max": 0.05662826739329224, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.055024615054329234, "min": 0.021949862067898115, "max": 0.06252035455157359, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.11004923010865847, "min": 0.04389972413579623, "max": 0.17996499240398406, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 4.584548471850001e-06, "min": 4.584548471850001e-06, "max": 0.000295369876543375, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 9.169096943700002e-06, "min": 9.169096943700002e-06, "max": 0.0008443689185436999, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10152814999999997, "min": 0.10152814999999997, "max": 0.198456625, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.20305629999999994, "min": 0.20305629999999994, "max": 0.5814563, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 8.625468499999999e-05, "min": 8.625468499999999e-05, "max": 0.0049229855875, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00017250936999999998, "min": 0.00017250936999999998, "max": 0.01407466937, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1738755724", "python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics", "mlagents_version": "1.2.0.dev0", "mlagents_envs_version": "1.2.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.6.0+cu124", "numpy_version": "1.23.5", "end_time_seconds": "1738758144" }, "total": 2419.59310026, "count": 1, "self": 0.7476231169998755, "children": { "run_training.setup": { "total": 0.022587933999943743, "count": 1, "self": 0.022587933999943743 }, "TrainerController.start_learning": { "total": 2418.822889209, "count": 1, "self": 4.407563494982696, "children": { "TrainerController._reset_env": { "total": 3.1402409099999886, "count": 1, "self": 3.1402409099999886 }, "TrainerController.advance": { "total": 2411.100260125017, "count": 231914, "self": 4.59575527787274, "children": { "env_step": { "total": 1914.8046750911553, "count": 231914, "self": 1492.0637309712706, "children": { "SubprocessEnvManager._take_step": { "total": 420.1301805450023, "count": 231914, "self": 15.492411518013, "children": { "TorchPolicy.evaluate": { "total": 404.6377690269893, "count": 222890, "self": 404.6377690269893 } } }, "workers": { "total": 2.6107635748824123, "count": 231914, "self": 0.0, "children": { "worker_root": { "total": 2411.383374013023, "count": 231914, "is_parallel": true, "self": 1200.855698177027, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0009608450000087032, "count": 1, "is_parallel": true, "self": 0.0003475190000017392, "children": { "_process_rank_one_or_two_observation": { "total": 0.000613326000006964, "count": 2, "is_parallel": true, "self": 0.000613326000006964 } } }, "UnityEnvironment.step": { "total": 0.029415951000032692, "count": 1, "is_parallel": true, "self": 0.0003011499999274747, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0001876780000884537, "count": 1, "is_parallel": true, "self": 0.0001876780000884537 }, "communicator.exchange": { "total": 0.0282175830000142, "count": 1, "is_parallel": true, "self": 0.0282175830000142 }, "steps_from_proto": { "total": 0.000709540000002562, "count": 1, "is_parallel": true, "self": 0.00020398999993176403, "children": { "_process_rank_one_or_two_observation": { "total": 0.000505550000070798, "count": 2, "is_parallel": true, "self": 0.000505550000070798 } } } } } } }, "UnityEnvironment.step": { "total": 1210.527675835996, "count": 231913, "is_parallel": true, "self": 37.45615338202515, "children": { "UnityEnvironment._generate_step_input": { "total": 80.1401831889915, "count": 231913, "is_parallel": true, "self": 80.1401831889915 }, "communicator.exchange": { "total": 1007.4910889450516, "count": 231913, "is_parallel": true, "self": 1007.4910889450516 }, "steps_from_proto": { "total": 85.44025031992771, "count": 231913, "is_parallel": true, "self": 30.07291444272653, "children": { "_process_rank_one_or_two_observation": { "total": 55.367335877201185, "count": 463826, "is_parallel": true, "self": 55.367335877201185 } } } } } } } } } } }, "trainer_advance": { "total": 491.69982975598896, "count": 231914, "self": 6.503726747945507, "children": { "process_trajectory": { "total": 157.30269372004352, "count": 231914, "self": 155.85395701404366, "children": { "RLTrainer._checkpoint": { "total": 1.4487367059998633, "count": 10, "self": 1.4487367059998633 } } }, "_update_policy": { "total": 327.89340928799993, "count": 96, "self": 261.74668694199727, "children": { "TorchPPOOptimizer.update": { "total": 66.14672234600266, "count": 2880, "self": 66.14672234600266 } } } } } } }, "trainer_threads": { "total": 1.1639999684120994e-06, "count": 1, "self": 1.1639999684120994e-06 }, "TrainerController._save_models": { "total": 0.17482351500029836, "count": 1, "self": 0.0027196330001970637, "children": { "RLTrainer._checkpoint": { "total": 0.1721038820001013, "count": 1, "self": 0.1721038820001013 } } } } } } }