SnowballTarget1 / run_logs /timers.json
eduiqe's picture
First commit
be46826
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0273736715316772,
"min": 1.0273736715316772,
"max": 2.8761963844299316,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9809.36328125,
"min": 9809.36328125,
"max": 29486.765625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.962425231933594,
"min": 0.2534676492214203,
"max": 12.962425231933594,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2527.6728515625,
"min": 49.172725677490234,
"max": 2606.60791015625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07032245017206365,
"min": 0.06528018078967637,
"max": 0.07327674707333905,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2812898006882546,
"min": 0.2645933747168302,
"max": 0.3663837353666952,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1970665380358696,
"min": 0.10647535229708963,
"max": 0.28842396216065275,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7882661521434784,
"min": 0.4259014091883585,
"max": 1.416002262748924,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.613636363636363,
"min": 3.0454545454545454,
"max": 25.613636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1127.0,
"min": 134.0,
"max": 1370.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.613636363636363,
"min": 3.0454545454545454,
"max": 25.613636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1127.0,
"min": 134.0,
"max": 1370.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678907245",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678907714"
},
"total": 469.286838568,
"count": 1,
"self": 0.43509517500001493,
"children": {
"run_training.setup": {
"total": 0.18169876199999635,
"count": 1,
"self": 0.18169876199999635
},
"TrainerController.start_learning": {
"total": 468.670044631,
"count": 1,
"self": 0.5750117690001275,
"children": {
"TrainerController._reset_env": {
"total": 8.44552257700002,
"count": 1,
"self": 8.44552257700002
},
"TrainerController.advance": {
"total": 459.51334908099983,
"count": 18201,
"self": 0.28581167000641017,
"children": {
"env_step": {
"total": 459.2275374109934,
"count": 18201,
"self": 330.7172023179776,
"children": {
"SubprocessEnvManager._take_step": {
"total": 128.23284132900193,
"count": 18201,
"self": 2.068288839991226,
"children": {
"TorchPolicy.evaluate": {
"total": 126.1645524890107,
"count": 18201,
"self": 126.1645524890107
}
}
},
"workers": {
"total": 0.27749376401391146,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 467.2351463169953,
"count": 18201,
"is_parallel": true,
"self": 221.35111302199533,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0058084780000058345,
"count": 1,
"is_parallel": true,
"self": 0.004199190999997882,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016092870000079529,
"count": 10,
"is_parallel": true,
"self": 0.0016092870000079529
}
}
},
"UnityEnvironment.step": {
"total": 0.051360861000034674,
"count": 1,
"is_parallel": true,
"self": 0.0037873570000215295,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003993999999920561,
"count": 1,
"is_parallel": true,
"self": 0.0003993999999920561
},
"communicator.exchange": {
"total": 0.04521996900001568,
"count": 1,
"is_parallel": true,
"self": 0.04521996900001568
},
"steps_from_proto": {
"total": 0.001954135000005408,
"count": 1,
"is_parallel": true,
"self": 0.0003967609999904198,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015573740000149883,
"count": 10,
"is_parallel": true,
"self": 0.0015573740000149883
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 245.884033295,
"count": 18200,
"is_parallel": true,
"self": 9.706542587005572,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.37257240100007,
"count": 18200,
"is_parallel": true,
"self": 5.37257240100007
},
"communicator.exchange": {
"total": 198.27650285699696,
"count": 18200,
"is_parallel": true,
"self": 198.27650285699696
},
"steps_from_proto": {
"total": 32.5284154499974,
"count": 18200,
"is_parallel": true,
"self": 6.506366868979796,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.0220485810176,
"count": 182000,
"is_parallel": true,
"self": 26.0220485810176
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00012082699993243295,
"count": 1,
"self": 0.00012082699993243295,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 456.14905428200603,
"count": 407369,
"is_parallel": true,
"self": 10.040476226018143,
"children": {
"process_trajectory": {
"total": 254.18717188598754,
"count": 407369,
"is_parallel": true,
"self": 252.95841491298762,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2287569729999177,
"count": 4,
"is_parallel": true,
"self": 1.2287569729999177
}
}
},
"_update_policy": {
"total": 191.92140617000035,
"count": 90,
"is_parallel": true,
"self": 70.0075620379991,
"children": {
"TorchPPOOptimizer.update": {
"total": 121.91384413200126,
"count": 4587,
"is_parallel": true,
"self": 121.91384413200126
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13604037700008575,
"count": 1,
"self": 0.0009082220001346286,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13513215499995113,
"count": 1,
"self": 0.13513215499995113
}
}
}
}
}
}
}