poca-SoccerTwos / run_logs /timers.json
mikegarts's picture
First Push
fb63869
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.292717933654785,
"min": 2.2663424015045166,
"max": 3.2957677841186523,
"count": 566
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 44607.12109375,
"min": 10051.4208984375,
"max": 142803.0625,
"count": 566
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 50.618556701030926,
"min": 43.97345132743363,
"max": 999.0,
"count": 566
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19640.0,
"min": 15212.0,
"max": 24248.0,
"count": 566
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1486.4148442009343,
"min": 1179.706223024584,
"max": 1486.4148442009343,
"count": 501
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 288364.4797749813,
"min": 2360.146349147463,
"max": 328342.7055349428,
"count": 501
},
"SoccerTwos.Step.mean": {
"value": 5659962.0,
"min": 9672.0,
"max": 5659962.0,
"count": 566
},
"SoccerTwos.Step.sum": {
"value": 5659962.0,
"min": 9672.0,
"max": 5659962.0,
"count": 566
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.039656296372413635,
"min": -0.12947434186935425,
"max": 0.1382981240749359,
"count": 566
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 7.693321228027344,
"min": -18.660633087158203,
"max": 21.66172218322754,
"count": 566
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.039549946784973145,
"min": -0.12532289326190948,
"max": 0.1415165513753891,
"count": 566
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 7.672689437866211,
"min": -17.545204162597656,
"max": 21.361865997314453,
"count": 566
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 566
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 566
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.1206061864636608,
"min": -0.5965846135066106,
"max": 0.4601599991321564,
"count": 566
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 23.397600173950195,
"min": -47.004000186920166,
"max": 66.19439995288849,
"count": 566
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.1206061864636608,
"min": -0.5965846135066106,
"max": 0.4601599991321564,
"count": 566
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 23.397600173950195,
"min": -47.004000186920166,
"max": 66.19439995288849,
"count": 566
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 566
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 566
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.022239530365914105,
"min": 0.010096545951091685,
"max": 0.023152435064548626,
"count": 267
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.022239530365914105,
"min": 0.010096545951091685,
"max": 0.023152435064548626,
"count": 267
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09612657402952512,
"min": 6.308880415417662e-07,
"max": 0.10640911161899566,
"count": 267
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09612657402952512,
"min": 6.308880415417662e-07,
"max": 0.10640911161899566,
"count": 267
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09836790338158607,
"min": 7.920101258681218e-07,
"max": 0.10937313909331957,
"count": 267
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09836790338158607,
"min": 7.920101258681218e-07,
"max": 0.10937313909331957,
"count": 267
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 267
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 267
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 267
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 267
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 267
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 267
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677417398",
"python_version": "3.9.16 (main, Jan 11 2023, 10:02:19) \n[Clang 14.0.6 ]",
"command_line_arguments": "/opt/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1677439701"
},
"total": 22303.072364749998,
"count": 1,
"self": 0.32343374899573973,
"children": {
"run_training.setup": {
"total": 0.2731806670000001,
"count": 1,
"self": 0.2731806670000001
},
"TrainerController.start_learning": {
"total": 22302.475750334,
"count": 1,
"self": 4.137907545617054,
"children": {
"TrainerController._reset_env": {
"total": 4.081350543002182,
"count": 29,
"self": 4.081350543002182
},
"TrainerController.advance": {
"total": 22294.11792557938,
"count": 374061,
"self": 4.102707851754531,
"children": {
"env_step": {
"total": 17869.848999263504,
"count": 374061,
"self": 17274.55872279339,
"children": {
"SubprocessEnvManager._take_step": {
"total": 592.2230997697567,
"count": 374061,
"self": 18.98845215279391,
"children": {
"TorchPolicy.evaluate": {
"total": 573.2346476169628,
"count": 724964,
"self": 573.2346476169628
}
}
},
"workers": {
"total": 3.067176700355809,
"count": 374060,
"self": 0.0,
"children": {
"worker_root": {
"total": 22292.01485277676,
"count": 374060,
"is_parallel": true,
"self": 5553.071921290801,
"children": {
"steps_from_proto": {
"total": 0.045345873008321824,
"count": 58,
"is_parallel": true,
"self": 0.005311340010161203,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04003453299816062,
"count": 232,
"is_parallel": true,
"self": 0.04003453299816062
}
}
},
"UnityEnvironment.step": {
"total": 16738.89758561295,
"count": 374060,
"is_parallel": true,
"self": 47.77328840074915,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 298.2748947990431,
"count": 374060,
"is_parallel": true,
"self": 298.2748947990431
},
"communicator.exchange": {
"total": 15737.70776385374,
"count": 374060,
"is_parallel": true,
"self": 15737.70776385374
},
"steps_from_proto": {
"total": 655.1416385594167,
"count": 748120,
"is_parallel": true,
"self": 70.73070426205845,
"children": {
"_process_rank_one_or_two_observation": {
"total": 584.4109342973583,
"count": 2992480,
"is_parallel": true,
"self": 584.4109342973583
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 4420.166218464119,
"count": 374060,
"self": 42.170455885034244,
"children": {
"process_trajectory": {
"total": 658.6330152480891,
"count": 374060,
"self": 657.0412877480867,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5917275000024347,
"count": 11,
"self": 1.5917275000024347
}
}
},
"_update_policy": {
"total": 3719.362747330996,
"count": 267,
"self": 454.0781210390619,
"children": {
"TorchPOCAOptimizer.update": {
"total": 3265.2846262919343,
"count": 8010,
"self": 3265.2846262919343
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.830006557516754e-07,
"count": 1,
"self": 5.830006557516754e-07
},
"TrainerController._save_models": {
"total": 0.1385660830019333,
"count": 1,
"self": 0.0009357080016343389,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13763037500029895,
"count": 1,
"self": 0.13763037500029895
}
}
}
}
}
}
}