ppo-Pyramids / run_logs /timers.json
dmenini's picture
First push
8b9bcaf
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.19795969128608704,
"min": 0.17892177402973175,
"max": 0.39160123467445374,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 5948.29296875,
"min": 5373.37890625,
"max": 11716.708984375,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 274.85585585585585,
"min": 229.73643410852713,
"max": 333.5232558139535,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30509.0,
"min": 18321.0,
"max": 31268.0,
"count": 33
},
"Pyramids.Step.mean": {
"value": 1979905.0,
"min": 1019947.0,
"max": 1979905.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 1979905.0,
"min": 1019947.0,
"max": 1979905.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7205009460449219,
"min": 0.5460777878761292,
"max": 0.8055691719055176,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 213.98878479003906,
"min": 99.93223571777344,
"max": 241.67074584960938,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.00788911059498787,
"min": -0.008039699867367744,
"max": 0.0302625373005867,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.3430659770965576,
"min": -2.347592353820801,
"max": 8.503772735595703,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7254553426589285,
"min": 1.590122435546043,
"max": 1.768695984005928,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 193.2509983778,
"min": 91.67799962311983,
"max": 228.1239980906248,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7254553426589285,
"min": 1.590122435546043,
"max": 1.768695984005928,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 193.2509983778,
"min": 91.67799962311983,
"max": 228.1239980906248,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02336898377455197,
"min": 0.020513887662902533,
"max": 0.038077669581980444,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.617326182749821,
"min": 2.132349496590905,
"max": 3.253328613616759,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06737127927325216,
"min": 0.06507654296327513,
"max": 0.07193620100900132,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0105691890987825,
"min": 0.6003042566978063,
"max": 1.0488999902291276,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014879609234410405,
"min": 0.012685737521516988,
"max": 0.016918260468039934,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22319413851615608,
"min": 0.1246029937659235,
"max": 0.23685564655255908,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 5.235348254916669e-06,
"min": 5.235348254916669e-06,
"max": 0.00014841658386115556,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 7.853022382375003e-05,
"min": 7.853022382375003e-05,
"max": 0.0020361141712955497,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10174508333333335,
"min": 0.10174508333333335,
"max": 0.1494721777777778,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5261762500000002,
"min": 1.3452496000000003,
"max": 2.17870445,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000184333825,
"min": 0.000184333825,
"max": 0.00495227056,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.002765007375,
"min": 0.002765007375,
"max": 0.06795257455499999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008348886854946613,
"min": 0.00799898337572813,
"max": 0.01131406333297491,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12523330748081207,
"min": 0.10182657092809677,
"max": 0.15106484293937683,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679762508",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --resume --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679764986"
},
"total": 2477.7979970040014,
"count": 1,
"self": 0.47487297900261183,
"children": {
"run_training.setup": {
"total": 0.1086530889988353,
"count": 1,
"self": 0.1086530889988353
},
"TrainerController.start_learning": {
"total": 2477.214470936,
"count": 1,
"self": 1.3786628168909374,
"children": {
"TrainerController._reset_env": {
"total": 6.55010490299901,
"count": 1,
"self": 6.55010490299901
},
"TrainerController.advance": {
"total": 2469.1941361581084,
"count": 65412,
"self": 1.4882786821872287,
"children": {
"env_step": {
"total": 1837.788205910132,
"count": 65412,
"self": 1730.137402586999,
"children": {
"SubprocessEnvManager._take_step": {
"total": 106.80889161200867,
"count": 65412,
"self": 4.579256114773671,
"children": {
"TorchPolicy.evaluate": {
"total": 102.229635497235,
"count": 62560,
"self": 102.229635497235
}
}
},
"workers": {
"total": 0.8419117111243395,
"count": 65412,
"self": 0.0,
"children": {
"worker_root": {
"total": 2471.83919368398,
"count": 65412,
"is_parallel": true,
"self": 858.5424031159691,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016750790000514826,
"count": 1,
"is_parallel": true,
"self": 0.0005376200006139698,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011374589994375128,
"count": 8,
"is_parallel": true,
"self": 0.0011374589994375128
}
}
},
"UnityEnvironment.step": {
"total": 0.07919898999898578,
"count": 1,
"is_parallel": true,
"self": 0.0004944439988321392,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004383019986562431,
"count": 1,
"is_parallel": true,
"self": 0.0004383019986562431
},
"communicator.exchange": {
"total": 0.07669147300111945,
"count": 1,
"is_parallel": true,
"self": 0.07669147300111945
},
"steps_from_proto": {
"total": 0.0015747710003779503,
"count": 1,
"is_parallel": true,
"self": 0.00037540799712587614,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011993630032520741,
"count": 8,
"is_parallel": true,
"self": 0.0011993630032520741
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1613.2967905680107,
"count": 65411,
"is_parallel": true,
"self": 30.840027512387678,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.224878790815637,
"count": 65411,
"is_parallel": true,
"self": 22.224878790815637
},
"communicator.exchange": {
"total": 1468.603954711847,
"count": 65411,
"is_parallel": true,
"self": 1468.603954711847
},
"steps_from_proto": {
"total": 91.62792955296027,
"count": 65411,
"is_parallel": true,
"self": 19.466922851213894,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.16100670174637,
"count": 523288,
"is_parallel": true,
"self": 72.16100670174637
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 629.9176515657891,
"count": 65412,
"self": 2.7014179858852003,
"children": {
"process_trajectory": {
"total": 123.35763384293205,
"count": 65412,
"self": 123.14899652393251,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20863731899953564,
"count": 2,
"self": 0.20863731899953564
}
}
},
"_update_policy": {
"total": 503.8585997369719,
"count": 473,
"self": 319.8509355107999,
"children": {
"TorchPPOOptimizer.update": {
"total": 184.00766422617198,
"count": 22749,
"self": 184.00766422617198
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.800016075838357e-07,
"count": 1,
"self": 8.800016075838357e-07
},
"TrainerController._save_models": {
"total": 0.09156617799999367,
"count": 1,
"self": 0.0017570099989825394,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08980916800101113,
"count": 1,
"self": 0.08980916800101113
}
}
}
}
}
}
}