ppo-Pyramids / run_logs /timers.json
Michunie's picture
Trained for another 500_000 stepes
5062b0a
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4532043933868408,
"min": 0.44381052255630493,
"max": 0.5803528428077698,
"count": 17
},
"Pyramids.Policy.Entropy.sum": {
"value": 13726.654296875,
"min": 10595.9267578125,
"max": 17317.728515625,
"count": 17
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 432.7313432835821,
"min": 394.775,
"max": 581.1666666666666,
"count": 17
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28993.0,
"min": 15791.0,
"max": 31863.0,
"count": 17
},
"Pyramids.Step.mean": {
"value": 1499986.0,
"min": 1019938.0,
"max": 1499986.0,
"count": 17
},
"Pyramids.Step.sum": {
"value": 1499986.0,
"min": 1019938.0,
"max": 1499986.0,
"count": 17
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.36165475845336914,
"min": 0.2319091558456421,
"max": 0.4280426502227783,
"count": 17
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 96.20016479492188,
"min": 59.83256149291992,
"max": 113.85934448242188,
"count": 17
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.035503603518009186,
"min": -1.390764832496643,
"max": 1.850163459777832,
"count": 17
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 9.443958282470703,
"min": -358.81732177734375,
"max": 475.49200439453125,
"count": 17
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.477692515102785,
"min": 0.9784666348029586,
"max": 1.5552099861204625,
"count": 17
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 99.0053985118866,
"min": 49.901798374950886,
"max": 103.12619817256927,
"count": 17
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.477692515102785,
"min": 0.9784666348029586,
"max": 1.5552099861204625,
"count": 17
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 99.0053985118866,
"min": 49.901798374950886,
"max": 103.12619817256927,
"count": 17
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04428825573735078,
"min": 0.04428825573735078,
"max": 0.07632057223320142,
"count": 17
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.9673131344025023,
"min": 2.0459056455874816,
"max": 4.027543442418391,
"count": 17
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06789491880183397,
"min": 0.06588758243163408,
"max": 0.07212834758557225,
"count": 17
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9505288632256755,
"min": 0.6491551282701503,
"max": 1.0362443234674479,
"count": 17
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016964047471576902,
"min": 0.015326032984085646,
"max": 0.5244140750891595,
"count": 17
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23749666460207663,
"min": 0.21456446177719904,
"max": 7.341797051248233,
"count": 17
},
"Pyramids.Policy.LearningRate.mean": {
"value": 3.0845275432857135e-06,
"min": 3.0845275432857135e-06,
"max": 9.787477848620741e-05,
"count": 17
},
"Pyramids.Policy.LearningRate.sum": {
"value": 4.318338560599999e-05,
"min": 4.318338560599999e-05,
"max": 0.0013020861659715998,
"count": 17
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10102814285714287,
"min": 0.10102814285714287,
"max": 0.1326249037037037,
"count": 17
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4143940000000002,
"min": 1.1936241333333333,
"max": 1.9048849333333338,
"count": 17
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00011271147142857142,
"min": 0.00011271147142857142,
"max": 0.00326922788,
"count": 17
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0015779605999999998,
"min": 0.0015779605999999998,
"max": 0.04349943716,
"count": 17
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009777999483048916,
"min": 0.009777999483048916,
"max": 0.01282537542283535,
"count": 17
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13689199090003967,
"min": 0.11162860691547394,
"max": 0.18299421668052673,
"count": 17
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 17
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 17
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674394967",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674396060"
},
"total": 1092.667596873,
"count": 1,
"self": 0.43018394000000626,
"children": {
"run_training.setup": {
"total": 0.10236797300012768,
"count": 1,
"self": 0.10236797300012768
},
"TrainerController.start_learning": {
"total": 1092.13504496,
"count": 1,
"self": 0.6659482690456571,
"children": {
"TrainerController._reset_env": {
"total": 6.009644183999626,
"count": 1,
"self": 6.009644183999626
},
"TrainerController.advance": {
"total": 1085.3672708049548,
"count": 32135,
"self": 0.6634570369433277,
"children": {
"env_step": {
"total": 759.7270263679825,
"count": 32135,
"self": 706.0505475659379,
"children": {
"SubprocessEnvManager._take_step": {
"total": 53.269509911006026,
"count": 32135,
"self": 2.2648675340533373,
"children": {
"TorchPolicy.evaluate": {
"total": 51.00464237695269,
"count": 31326,
"self": 17.25952298097218,
"children": {
"TorchPolicy.sample_actions": {
"total": 33.74511939598051,
"count": 31326,
"self": 33.74511939598051
}
}
}
}
},
"workers": {
"total": 0.4069688910385594,
"count": 32135,
"self": 0.0,
"children": {
"worker_root": {
"total": 1089.611901268005,
"count": 32135,
"is_parallel": true,
"self": 434.65522339204836,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018921700002465514,
"count": 1,
"is_parallel": true,
"self": 0.000711918999968475,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011802510002780764,
"count": 8,
"is_parallel": true,
"self": 0.0011802510002780764
}
}
},
"UnityEnvironment.step": {
"total": 0.048748384000191436,
"count": 1,
"is_parallel": true,
"self": 0.0005123490004734776,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00054005700030757,
"count": 1,
"is_parallel": true,
"self": 0.00054005700030757
},
"communicator.exchange": {
"total": 0.045948587999646406,
"count": 1,
"is_parallel": true,
"self": 0.045948587999646406
},
"steps_from_proto": {
"total": 0.001747389999763982,
"count": 1,
"is_parallel": true,
"self": 0.00042254599975422025,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013248440000097617,
"count": 8,
"is_parallel": true,
"self": 0.0013248440000097617
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 654.9566778759568,
"count": 32134,
"is_parallel": true,
"self": 13.927581371995984,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.930569120969267,
"count": 32134,
"is_parallel": true,
"self": 11.930569120969267
},
"communicator.exchange": {
"total": 576.0525423230297,
"count": 32134,
"is_parallel": true,
"self": 576.0525423230297
},
"steps_from_proto": {
"total": 53.04598505996182,
"count": 32134,
"is_parallel": true,
"self": 11.768339568886404,
"children": {
"_process_rank_one_or_two_observation": {
"total": 41.27764549107542,
"count": 257072,
"is_parallel": true,
"self": 41.27764549107542
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 324.976787400029,
"count": 32135,
"self": 1.2968600550302654,
"children": {
"process_trajectory": {
"total": 75.45606778799629,
"count": 32135,
"self": 75.3409848169963,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11508297099999254,
"count": 1,
"self": 0.11508297099999254
}
}
},
"_update_policy": {
"total": 248.22385955700247,
"count": 235,
"self": 92.66320894699629,
"children": {
"TorchPPOOptimizer.update": {
"total": 155.56065061000618,
"count": 11313,
"self": 155.56065061000618
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.289997251471505e-07,
"count": 1,
"self": 9.289997251471505e-07
},
"TrainerController._save_models": {
"total": 0.09218077300010918,
"count": 1,
"self": 0.001816039000004821,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09036473400010436,
"count": 1,
"self": 0.09036473400010436
}
}
}
}
}
}
}