{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4105287790298462, "min": 1.4105287790298462, "max": 1.427003264427185, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 70193.5546875, "min": 68365.8671875, "max": 78168.890625, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 78.34920634920636, "min": 74.73484848484848, "max": 369.45185185185187, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49360.0, "min": 48669.0, "max": 50036.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999999.0, "min": 49886.0, "max": 1999999.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999999.0, "min": 49886.0, "max": 1999999.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.5186657905578613, "min": 0.09652551263570786, "max": 2.5186657905578613, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1586.759521484375, "min": 13.127470016479492, "max": 1622.793212890625, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.9712726523005775, "min": 1.7301150268929846, "max": 4.024853588859307, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2501.9017709493637, "min": 235.2956436574459, "max": 2505.668896853924, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.9712726523005775, "min": 1.7301150268929846, "max": 4.024853588859307, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2501.9017709493637, "min": 235.2956436574459, "max": 2505.668896853924, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.01743607443046737, "min": 0.0132064024117426, "max": 0.022148759058109136, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.052308223291402105, "min": 0.0264128048234852, "max": 0.05521203211974353, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.055797830141252946, "min": 0.02118114416177074, "max": 0.06686211060732603, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.16739349042375884, "min": 0.04236228832354148, "max": 0.17688428945839405, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.707448764216666e-06, "min": 3.707448764216666e-06, "max": 0.00029531557656147493, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.1122346292649998e-05, "min": 1.1122346292649998e-05, "max": 0.0008441172186276001, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10123578333333333, "min": 0.10123578333333333, "max": 0.19843852499999995, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30370735, "min": 0.20760275, "max": 0.5813724000000001, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 7.166558833333329e-05, "min": 7.166558833333329e-05, "max": 0.004922082397499999, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00021499676499999988, "min": 0.00021499676499999988, "max": 0.01407048276, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1673454751", "python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.29.0.dev0", "mlagents_envs_version": "0.29.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.8.1+cu102", "numpy_version": "1.21.6", "end_time_seconds": "1673456926" }, "total": 2174.759543926, "count": 1, "self": 0.3876021139999466, "children": { "run_training.setup": { "total": 0.11229681899999377, "count": 1, "self": 0.11229681899999377 }, "TrainerController.start_learning": { "total": 2174.259644993, "count": 1, "self": 3.637495599999511, "children": { "TrainerController._reset_env": { "total": 7.673638118999861, "count": 1, "self": 7.673638118999861 }, "TrainerController.advance": { "total": 2162.8348601880007, "count": 232829, "self": 3.776690117988437, "children": { "env_step": { "total": 1695.5637006469537, "count": 232829, "self": 1428.5703123600483, "children": { "SubprocessEnvManager._take_step": { "total": 264.38925439194395, "count": 232829, "self": 13.933211703001916, "children": { "TorchPolicy.evaluate": { "total": 250.45604268894203, "count": 222919, "self": 62.71896190783241, "children": { "TorchPolicy.sample_actions": { "total": 187.73708078110963, "count": 222919, "self": 187.73708078110963 } } } } }, "workers": { "total": 2.6041338949614783, "count": 232829, "self": 0.0, "children": { "worker_root": { "total": 2166.7732357879745, "count": 232829, "is_parallel": true, "self": 989.667506905043, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0020419079999101086, "count": 1, "is_parallel": true, "self": 0.0003392479998183262, "children": { "_process_rank_one_or_two_observation": { "total": 0.0017026600000917824, "count": 2, "is_parallel": true, "self": 0.0017026600000917824 } } }, "UnityEnvironment.step": { "total": 0.026216574999807563, "count": 1, "is_parallel": true, "self": 0.00029306400006134936, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00019486399992274528, "count": 1, "is_parallel": true, "self": 0.00019486399992274528 }, "communicator.exchange": { "total": 0.024843767999982447, "count": 1, "is_parallel": true, "self": 0.024843767999982447 }, "steps_from_proto": { "total": 0.0008848789998410211, "count": 1, "is_parallel": true, "self": 0.00041899899974850996, "children": { "_process_rank_one_or_two_observation": { "total": 0.0004658800000925112, "count": 2, "is_parallel": true, "self": 0.0004658800000925112 } } } } } } }, "UnityEnvironment.step": { "total": 1177.1057288829315, "count": 232828, "is_parallel": true, "self": 33.95334850185236, "children": { "UnityEnvironment._generate_step_input": { "total": 74.67691639793225, "count": 232828, "is_parallel": true, "self": 74.67691639793225 }, "communicator.exchange": { "total": 977.28371492307, "count": 232828, "is_parallel": true, "self": 977.28371492307 }, "steps_from_proto": { "total": 91.19174906007697, "count": 232828, "is_parallel": true, "self": 37.16437409914738, "children": { "_process_rank_one_or_two_observation": { "total": 54.02737496092959, "count": 465656, "is_parallel": true, "self": 54.02737496092959 } } } } } } } } } } }, "trainer_advance": { "total": 463.4944694230585, "count": 232829, "self": 5.832206336075615, "children": { "process_trajectory": { "total": 146.60310968998283, "count": 232829, "self": 145.43390570398356, "children": { "RLTrainer._checkpoint": { "total": 1.1692039859992747, "count": 10, "self": 1.1692039859992747 } } }, "_update_policy": { "total": 311.05915339700005, "count": 97, "self": 258.3015022480024, "children": { "TorchPPOOptimizer.update": { "total": 52.75765114899764, "count": 2910, "self": 52.75765114899764 } } } } } } }, "trainer_threads": { "total": 9.380000847158954e-07, "count": 1, "self": 9.380000847158954e-07 }, "TrainerController._save_models": { "total": 0.11365014800003337, "count": 1, "self": 0.002062428000044747, "children": { "RLTrainer._checkpoint": { "total": 0.11158771999998862, "count": 1, "self": 0.11158771999998862 } } } } } } }