{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 3.185758113861084, "min": 2.409449577331543, "max": 3.295715808868408, "count": 772 }, "SoccerTwos.Policy.Entropy.sum": { "value": 61880.1640625, "min": 19984.75, "max": 145273.109375, "count": 772 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 507.8, "min": 289.1875, "max": 999.0, "count": 772 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 20312.0, "min": 16356.0, "max": 25484.0, "count": 772 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1196.9318981332397, "min": 1181.837566493522, "max": 1201.3472046468949, "count": 275 }, "SoccerTwos.Self-play.ELO.sum": { "value": 14363.182777598875, "min": 2363.675132987044, "max": 26400.689003477662, "count": 275 }, "SoccerTwos.Step.mean": { "value": 7719494.0, "min": 9758.0, "max": 7719494.0, "count": 772 }, "SoccerTwos.Step.sum": { "value": 7719494.0, "min": 9758.0, "max": 7719494.0, "count": 772 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": -0.0052559832111001015, "min": -0.20999744534492493, "max": 0.10688348114490509, "count": 772 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": -0.10511966794729233, "min": -2.376962661743164, "max": 1.389485239982605, "count": 772 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": -0.006780259311199188, "min": -0.23403751850128174, "max": 0.106874018907547, "count": 772 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": -0.13560518622398376, "min": -2.3799240589141846, "max": 1.3893622159957886, "count": 772 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 772 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 772 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": -0.2688399970531464, "min": -0.6666666666666666, "max": 0.301000005669064, "count": 772 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": -5.376799941062927, "min": -10.0, "max": 5.861999928951263, "count": 772 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": -0.2688399970531464, "min": -0.6666666666666666, "max": 0.301000005669064, "count": 772 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": -5.376799941062927, "min": -10.0, "max": 5.861999928951263, "count": 772 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 772 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 772 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.017427041847258806, "min": 0.010121221931596362, "max": 0.0252917977127557, "count": 355 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.017427041847258806, "min": 0.010121221931596362, "max": 0.0252917977127557, "count": 355 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.00393834006972611, "min": 1.2345998963899282e-09, "max": 0.010751086202314278, "count": 355 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.00393834006972611, "min": 1.2345998963899282e-09, "max": 0.010751086202314278, "count": 355 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.003957364715946218, "min": 9.012675435589775e-09, "max": 0.009901517381270727, "count": 355 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.003957364715946218, "min": 9.012675435589775e-09, "max": 0.009901517381270727, "count": 355 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 355 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 355 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 355 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 355 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 355 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 355 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1681331161", "python_version": "3.9.13 (main, Nov 7 2022, 17:01:06) \n[Clang 14.0.0 (clang-1400.0.29.202)]", "command_line_arguments": "/Users/jackmurphy/Documents/courses/hf-deep-rl/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --force", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0", "numpy_version": "1.21.2", "end_time_seconds": "1681363933" }, "total": 32772.095957375, "count": 1, "self": 0.5066618330019992, "children": { "run_training.setup": { "total": 0.07895366699999862, "count": 1, "self": 0.07895366699999862 }, "TrainerController.start_learning": { "total": 32771.510341875, "count": 1, "self": 7.002623866432259, "children": { "TrainerController._reset_env": { "total": 9.317278415991156, "count": 39, "self": 9.317278415991156 }, "TrainerController.advance": { "total": 32754.862239133578, "count": 503950, "self": 6.2791733188787475, "children": { "env_step": { "total": 27147.697168513772, "count": 503950, "self": 26168.935575880743, "children": { "SubprocessEnvManager._take_step": { "total": 974.453022444436, "count": 503950, "self": 32.0912888506075, "children": { "TorchPolicy.evaluate": { "total": 942.3617335938285, "count": 1000848, "self": 942.3617335938285 } } }, "workers": { "total": 4.308570188593826, "count": 503949, "self": 0.0, "children": { "worker_root": { "total": 32750.94734136199, "count": 503949, "is_parallel": true, "self": 7445.106800660382, "children": { "steps_from_proto": { "total": 0.06810316799001459, "count": 78, "is_parallel": true, "self": 0.008336256973582756, "children": { "_process_rank_one_or_two_observation": { "total": 0.059766911016431834, "count": 312, "is_parallel": true, "self": 0.059766911016431834 } } }, "UnityEnvironment.step": { "total": 25305.77243753362, "count": 503949, "is_parallel": true, "self": 68.98783545863625, "children": { "UnityEnvironment._generate_step_input": { "total": 433.1887908870273, "count": 503949, "is_parallel": true, "self": 433.1887908870273 }, "communicator.exchange": { "total": 23941.82480981736, "count": 503949, "is_parallel": true, "self": 23941.82480981736 }, "steps_from_proto": { "total": 861.7710013705948, "count": 1007898, "is_parallel": true, "self": 100.56571404315241, "children": { "_process_rank_one_or_two_observation": { "total": 761.2052873274424, "count": 4031592, "is_parallel": true, "self": 761.2052873274424 } } } } } } } } } } }, "trainer_advance": { "total": 5600.8858973009255, "count": 503949, "self": 63.951690969590345, "children": { "process_trajectory": { "total": 817.6289681963408, "count": 503949, "self": 815.3199227363489, "children": { "RLTrainer._checkpoint": { "total": 2.3090454599919212, "count": 15, "self": 2.3090454599919212 } } }, "_update_policy": { "total": 4719.305238134994, "count": 356, "self": 758.1159668570444, "children": { "TorchPOCAOptimizer.update": { "total": 3961.18927127795, "count": 10680, "self": 3961.18927127795 } } } } } } }, "trainer_threads": { "total": 4.169996827840805e-07, "count": 1, "self": 4.169996827840805e-07 }, "TrainerController._save_models": { "total": 0.32820004200038966, "count": 1, "self": 0.15608808400429552, "children": { "RLTrainer._checkpoint": { "total": 0.17211195799609413, "count": 1, "self": 0.17211195799609413 } } } } } } }