{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 1.886918306350708, "min": 1.8025468587875366, "max": 2.7012710571289062, "count": 332 }, "SoccerTwos.Policy.Entropy.sum": { "value": 39368.6640625, "min": 33501.44140625, "max": 59490.9609375, "count": 332 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 58.18181818181818, "min": 42.74561403508772, "max": 81.10169491525424, "count": 332 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 20480.0, "min": 18440.0, "max": 20752.0, "count": 332 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1595.6950137239876, "min": 1338.5723918899857, "max": 1596.5021363768267, "count": 332 }, "SoccerTwos.Self-play.ELO.sum": { "value": 280842.32241542183, "min": 175471.11833795102, "max": 357606.4086274944, "count": 332 }, "SoccerTwos.Step.mean": { "value": 4999894.0, "min": 1689994.0, "max": 4999894.0, "count": 332 }, "SoccerTwos.Step.sum": { "value": 4999894.0, "min": 1689994.0, "max": 4999894.0, "count": 332 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": 0.0077475616708397865, "min": -0.13937878608703613, "max": 0.18706056475639343, "count": 332 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": 1.355823278427124, "min": -28.7120304107666, "max": 37.97329330444336, "count": 332 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": 0.011240621097385883, "min": -0.1387418657541275, "max": 0.18817657232284546, "count": 332 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": 1.9671087265014648, "min": -28.58082389831543, "max": 38.19984436035156, "count": 332 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 332 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 332 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": -0.024923428807939803, "min": -0.2731864079109673, "max": 0.33552906577810276, "count": 332 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": -4.361600041389465, "min": -56.4196001291275, "max": 68.11240035295486, "count": 332 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": -0.024923428807939803, "min": -0.2731864079109673, "max": 0.33552906577810276, "count": 332 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": -4.361600041389465, "min": -56.4196001291275, "max": 68.11240035295486, "count": 332 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 332 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 332 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.015793424926232546, "min": 0.01054089194512926, "max": 0.02321200983618231, "count": 161 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.015793424926232546, "min": 0.01054089194512926, "max": 0.02321200983618231, "count": 161 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.10713534901539484, "min": 0.07381759186585744, "max": 0.12374988198280334, "count": 161 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.10713534901539484, "min": 0.07381759186585744, "max": 0.12374988198280334, "count": 161 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.10853615254163743, "min": 0.07539163008332253, "max": 0.1266367405653, "count": 161 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.10853615254163743, "min": 0.07539163008332253, "max": 0.1266367405653, "count": 161 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 161 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 161 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 161 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 161 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 161 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 161 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1678642148", "python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]", "command_line_arguments": "C:\\Users\\HQ\\.conda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.13.1+cpu", "numpy_version": "1.21.2", "end_time_seconds": "1678659092" }, "total": 16943.7443946, "count": 1, "self": 2.328840800000762, "children": { "run_training.setup": { "total": 0.14083170000000012, "count": 1, "self": 0.14083170000000012 }, "TrainerController.start_learning": { "total": 16941.2747221, "count": 1, "self": 7.865852200073277, "children": { "TrainerController._reset_env": { "total": 12.534407000000224, "count": 18, "self": 12.534407000000224 }, "TrainerController.advance": { "total": 16920.549121799926, "count": 231347, "self": 8.1318285000998, "children": { "env_step": { "total": 5273.426564600043, "count": 231347, "self": 3989.9284081008304, "children": { "SubprocessEnvManager._take_step": { "total": 1278.885171499597, "count": 231347, "self": 47.434117300515936, "children": { "TorchPolicy.evaluate": { "total": 1231.451054199081, "count": 416400, "self": 1231.451054199081 } } }, "workers": { "total": 4.612984999615806, "count": 231347, "self": 0.0, "children": { "worker_root": { "total": 16918.35711559972, "count": 231347, "is_parallel": true, "self": 13710.730819499891, "children": { "steps_from_proto": { "total": 0.04045539999365211, "count": 36, "is_parallel": true, "self": 0.009021499999771976, "children": { "_process_rank_one_or_two_observation": { "total": 0.03143389999388013, "count": 144, "is_parallel": true, "self": 0.03143389999388013 } } }, "UnityEnvironment.step": { "total": 3207.585840699834, "count": 231347, "is_parallel": true, "self": 151.8261821994147, "children": { "UnityEnvironment._generate_step_input": { "total": 118.18530329983994, "count": 231347, "is_parallel": true, "self": 118.18530329983994 }, "communicator.exchange": { "total": 2458.776118200445, "count": 231347, "is_parallel": true, "self": 2458.776118200445 }, "steps_from_proto": { "total": 478.79823700013435, "count": 462694, "is_parallel": true, "self": 103.94577940052068, "children": { "_process_rank_one_or_two_observation": { "total": 374.8524575996137, "count": 1850776, "is_parallel": true, "self": 374.8524575996137 } } } } } } } } } } }, "trainer_advance": { "total": 11638.990728699782, "count": 231347, "self": 54.63567629966383, "children": { "process_trajectory": { "total": 1689.3151956001163, "count": 231347, "self": 1687.9599439001204, "children": { "RLTrainer._checkpoint": { "total": 1.3552516999959607, "count": 7, "self": 1.3552516999959607 } } }, "_update_policy": { "total": 9895.039856800002, "count": 161, "self": 711.2314112000367, "children": { "TorchPOCAOptimizer.update": { "total": 9183.808445599965, "count": 4830, "self": 9183.808445599965 } } } } } } }, "trainer_threads": { "total": 1.2000018614344299e-06, "count": 1, "self": 1.2000018614344299e-06 }, "TrainerController._save_models": { "total": 0.325339899998653, "count": 1, "self": 0.02523500000097556, "children": { "RLTrainer._checkpoint": { "total": 0.3001048999976774, "count": 1, "self": 0.3001048999976774 } } } } } } }