{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.403515338897705, "min": 1.403515338897705, "max": 1.430283546447754, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 71893.671875, "min": 69029.6484375, "max": 78431.828125, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 74.08108108108108, "min": 74.00450450450451, "max": 403.992, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49338.0, "min": 49277.0, "max": 50499.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999927.0, "min": 49874.0, "max": 1999927.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999927.0, "min": 49874.0, "max": 1999927.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.463331937789917, "min": 0.1848631352186203, "max": 2.514521837234497, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1640.5791015625, "min": 22.92302894592285, "max": 1640.5791015625, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.8728220287386, "min": 1.6577408614177858, "max": 4.013960438044417, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2579.299471139908, "min": 205.55986681580544, "max": 2579.299471139908, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.8728220287386, "min": 1.6577408614177858, "max": 4.013960438044417, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2579.299471139908, "min": 205.55986681580544, "max": 2579.299471139908, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.015625031416978647, "min": 0.01248684703168692, "max": 0.019660496093436248, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.046875094250935945, "min": 0.02497369406337384, "max": 0.05898148828030875, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.06256526733438174, "min": 0.02117715183024605, "max": 0.06400879739473264, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.18769580200314523, "min": 0.0423543036604921, "max": 0.18769580200314523, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.7544487485500062e-06, "min": 3.7544487485500062e-06, "max": 0.000295306276564575, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.126334624565002e-05, "min": 1.126334624565002e-05, "max": 0.00084406996864335, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10125144999999998, "min": 0.10125144999999998, "max": 0.198435425, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30375434999999995, "min": 0.2076631, "max": 0.5813566500000003, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 7.244735500000012e-05, "min": 7.244735500000012e-05, "max": 0.004921927707499999, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00021734206500000037, "min": 0.00021734206500000037, "max": 0.014069696835, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1709211576", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.2.1+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1709213897" }, "total": 2321.044223613, "count": 1, "self": 0.44082684900058666, "children": { "run_training.setup": { "total": 0.04806289300006483, "count": 1, "self": 0.04806289300006483 }, "TrainerController.start_learning": { "total": 2320.5553338709997, "count": 1, "self": 4.3313350091616485, "children": { "TrainerController._reset_env": { "total": 3.129539152999996, "count": 1, "self": 3.129539152999996 }, "TrainerController.advance": { "total": 2312.977408667838, "count": 233264, "self": 4.620321450837764, "children": { "env_step": { "total": 1834.2828072179564, "count": 233264, "self": 1520.0704339060262, "children": { "SubprocessEnvManager._take_step": { "total": 311.45681148596304, "count": 233264, "self": 16.340205632001016, "children": { "TorchPolicy.evaluate": { "total": 295.116605853962, "count": 223006, "self": 295.116605853962 } } }, "workers": { "total": 2.7555618259669927, "count": 233264, "self": 0.0, "children": { "worker_root": { "total": 2313.458858096061, "count": 233264, "is_parallel": true, "self": 1071.031844906045, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0009166629999981524, "count": 1, "is_parallel": true, "self": 0.00027823299990359374, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006384300000945586, "count": 2, "is_parallel": true, "self": 0.0006384300000945586 } } }, "UnityEnvironment.step": { "total": 0.03127154999992854, "count": 1, "is_parallel": true, "self": 0.00040208400002939015, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00023852899994381005, "count": 1, "is_parallel": true, "self": 0.00023852899994381005 }, "communicator.exchange": { "total": 0.029829328000005262, "count": 1, "is_parallel": true, "self": 0.029829328000005262 }, "steps_from_proto": { "total": 0.0008016089999500764, "count": 1, "is_parallel": true, "self": 0.0002054189999398659, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005961900000102105, "count": 2, "is_parallel": true, "self": 0.0005961900000102105 } } } } } } }, "UnityEnvironment.step": { "total": 1242.4270131900157, "count": 233263, "is_parallel": true, "self": 40.75529222409864, "children": { "UnityEnvironment._generate_step_input": { "total": 80.87309059805614, "count": 233263, "is_parallel": true, "self": 80.87309059805614 }, "communicator.exchange": { "total": 1030.561879831873, "count": 233263, "is_parallel": true, "self": 1030.561879831873 }, "steps_from_proto": { "total": 90.23675053598777, "count": 233263, "is_parallel": true, "self": 31.700462954126465, "children": { "_process_rank_one_or_two_observation": { "total": 58.5362875818613, "count": 466526, "is_parallel": true, "self": 58.5362875818613 } } } } } } } } } } }, "trainer_advance": { "total": 474.07427999904417, "count": 233264, "self": 6.5551966959667425, "children": { "process_trajectory": { "total": 149.8836348960774, "count": 233264, "self": 148.58612029507765, "children": { "RLTrainer._checkpoint": { "total": 1.2975146009997616, "count": 10, "self": 1.2975146009997616 } } }, "_update_policy": { "total": 317.635448407, "count": 97, "self": 254.2482905919984, "children": { "TorchPPOOptimizer.update": { "total": 63.387157815001615, "count": 2910, "self": 63.387157815001615 } } } } } } }, "trainer_threads": { "total": 1.3779999790131114e-06, "count": 1, "self": 1.3779999790131114e-06 }, "TrainerController._save_models": { "total": 0.1170496629997615, "count": 1, "self": 0.0020809629995710566, "children": { "RLTrainer._checkpoint": { "total": 0.11496870000019044, "count": 1, "self": 0.11496870000019044 } } } } } } }