poca-SoccerTwos / run_logs /timers.json
Felipe474's picture
First Push
e937c25
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6018836498260498,
"min": 1.5453085899353027,
"max": 3.295727014541626,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 32191.453125,
"min": 23595.869140625,
"max": 121191.859375,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 52.68131868131868,
"min": 44.559633027522935,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19176.0,
"min": 14064.0,
"max": 26760.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1597.9941390638924,
"min": 1198.7252374813206,
"max": 1627.111825740658,
"count": 965
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 290834.9333096284,
"min": 2399.7320029860066,
"max": 353036.22238567786,
"count": 965
},
"SoccerTwos.Step.mean": {
"value": 9999924.0,
"min": 9398.0,
"max": 9999924.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999924.0,
"min": 9398.0,
"max": 9999924.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.06568997353315353,
"min": -0.11980075389146805,
"max": 0.19735249876976013,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -12.021265029907227,
"min": -17.609134674072266,
"max": 33.94462966918945,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.06923780590295792,
"min": -0.11975289136171341,
"max": 0.20725135505199432,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -12.67051887512207,
"min": -17.75794792175293,
"max": 35.64723205566406,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.11840218826721274,
"min": -0.7072088903850979,
"max": 0.47907618965421406,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -21.667600452899933,
"min": -60.839200139045715,
"max": 76.35919976234436,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.11840218826721274,
"min": -0.7072088903850979,
"max": 0.47907618965421406,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -21.667600452899933,
"min": -60.839200139045715,
"max": 76.35919976234436,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014544024500840655,
"min": 0.009437778448530783,
"max": 0.02323350603110157,
"count": 480
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014544024500840655,
"min": 0.009437778448530783,
"max": 0.02323350603110157,
"count": 480
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10801627139250437,
"min": 2.033423588727601e-05,
"max": 0.12143431057532629,
"count": 480
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10801627139250437,
"min": 2.033423588727601e-05,
"max": 0.12143431057532629,
"count": 480
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11004972979426383,
"min": 2.0046720086005127e-05,
"max": 0.12449382642904917,
"count": 480
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11004972979426383,
"min": 2.0046720086005127e-05,
"max": 0.12449382642904917,
"count": 480
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 480
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 480
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 480
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 480
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 480
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 480
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679857173",
"python_version": "3.10.6 (main, Mar 10 2023, 10:55:28) [GCC 11.3.0]",
"command_line_arguments": "/home/felipe/Documents/ML-Studies/HF-DeepRL/rl-course-huggingface/venv/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu113",
"numpy_version": "1.21.2",
"end_time_seconds": "1679866742"
},
"total": 9568.673476891,
"count": 1,
"self": 0.4911048750000191,
"children": {
"run_training.setup": {
"total": 0.016452390998892952,
"count": 1,
"self": 0.016452390998892952
},
"TrainerController.start_learning": {
"total": 9568.165919625,
"count": 1,
"self": 9.20331117276146,
"children": {
"TrainerController._reset_env": {
"total": 3.6472482859935553,
"count": 40,
"self": 3.6472482859935553
},
"TrainerController.advance": {
"total": 9555.147876447245,
"count": 677259,
"self": 9.58158161510437,
"children": {
"env_step": {
"total": 6968.529440571041,
"count": 677259,
"self": 5069.581832214357,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1892.9330405239089,
"count": 677259,
"self": 53.27897592883164,
"children": {
"TorchPolicy.evaluate": {
"total": 1839.6540645950772,
"count": 1260758,
"self": 1839.6540645950772
}
}
},
"workers": {
"total": 6.014567832775356,
"count": 677259,
"self": 0.0,
"children": {
"worker_root": {
"total": 9556.07453426981,
"count": 677259,
"is_parallel": true,
"self": 5494.763372448691,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001390330999129219,
"count": 2,
"is_parallel": true,
"self": 0.0003180130006512627,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010723179984779563,
"count": 8,
"is_parallel": true,
"self": 0.0010723179984779563
}
}
},
"UnityEnvironment.step": {
"total": 0.014358135002112249,
"count": 1,
"is_parallel": true,
"self": 0.0003966290059906896,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002613830001791939,
"count": 1,
"is_parallel": true,
"self": 0.0002613830001791939
},
"communicator.exchange": {
"total": 0.012668479997955728,
"count": 1,
"is_parallel": true,
"self": 0.012668479997955728
},
"steps_from_proto": {
"total": 0.001031642997986637,
"count": 2,
"is_parallel": true,
"self": 0.000214303992834175,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000817339005152462,
"count": 8,
"is_parallel": true,
"self": 0.000817339005152462
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4061.270461069129,
"count": 677258,
"is_parallel": true,
"self": 215.77667014224426,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 151.66650351202406,
"count": 677258,
"is_parallel": true,
"self": 151.66650351202406
},
"communicator.exchange": {
"total": 3029.6624329219558,
"count": 677258,
"is_parallel": true,
"self": 3029.6624329219558
},
"steps_from_proto": {
"total": 664.1648544929049,
"count": 1354516,
"is_parallel": true,
"self": 138.21079235819707,
"children": {
"_process_rank_one_or_two_observation": {
"total": 525.9540621347078,
"count": 5418064,
"is_parallel": true,
"self": 525.9540621347078
}
}
}
}
},
"steps_from_proto": {
"total": 0.04070075198978884,
"count": 78,
"is_parallel": true,
"self": 0.008413323947024764,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03228742804276408,
"count": 312,
"is_parallel": true,
"self": 0.03228742804276408
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2577.0368542610995,
"count": 677259,
"self": 66.75755179104453,
"children": {
"process_trajectory": {
"total": 895.4636379140866,
"count": 677259,
"self": 891.6356066650915,
"children": {
"RLTrainer._checkpoint": {
"total": 3.828031248995103,
"count": 20,
"self": 3.828031248995103
}
}
},
"_update_policy": {
"total": 1614.8156645559684,
"count": 480,
"self": 938.8587034042539,
"children": {
"TorchPOCAOptimizer.update": {
"total": 675.9569611517145,
"count": 14406,
"self": 675.9569611517145
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0280018614139408e-06,
"count": 1,
"self": 1.0280018614139408e-06
},
"TrainerController._save_models": {
"total": 0.16748269099844038,
"count": 1,
"self": 0.0024151759971573483,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16506751500128303,
"count": 1,
"self": 0.16506751500128303
}
}
}
}
}
}
}