ppo-Huggy / run_logs /timers.json
EranML's picture
Huggy
278e788
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4080092906951904,
"min": 1.4080092906951904,
"max": 1.4261890649795532,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69499.3359375,
"min": 68796.609375,
"max": 75182.71875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.25515947467167,
"min": 79.85463258785943,
"max": 417.94166666666666,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49172.0,
"min": 48748.0,
"max": 50153.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999954.0,
"min": 49672.0,
"max": 1999954.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999954.0,
"min": 49672.0,
"max": 1999954.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.412260055541992,
"min": 0.15314890444278717,
"max": 2.5013792514801025,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1285.734619140625,
"min": 18.224720001220703,
"max": 1509.40771484375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.64449921714432,
"min": 1.9191868007183075,
"max": 3.9513858913310935,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1942.5180827379227,
"min": 228.3832292854786,
"max": 2355.9419588446617,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.64449921714432,
"min": 1.9191868007183075,
"max": 3.9513858913310935,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1942.5180827379227,
"min": 228.3832292854786,
"max": 2355.9419588446617,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016052775053281544,
"min": 0.01256645431179398,
"max": 0.020202466641138825,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04815832515984463,
"min": 0.02513290862358796,
"max": 0.060607399923416475,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0542769325690137,
"min": 0.021305372348676126,
"max": 0.05934852005706893,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1628307977070411,
"min": 0.04261074469735225,
"max": 0.1780455601712068,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1524489492166694e-06,
"min": 3.1524489492166694e-06,
"max": 0.00029529607656797496,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.457346847650008e-06,
"min": 9.457346847650008e-06,
"max": 0.0008437675687441501,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10105078333333334,
"min": 0.10105078333333334,
"max": 0.19843202499999996,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30315235,
"min": 0.20726119999999998,
"max": 0.5812558499999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.243408833333344e-05,
"min": 6.243408833333344e-05,
"max": 0.0049217580475000005,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001873022650000003,
"min": 0.0001873022650000003,
"max": 0.014064666915,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690876189",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690878207"
},
"total": 2018.11215666,
"count": 1,
"self": 0.32133798900008514,
"children": {
"run_training.setup": {
"total": 0.030149504000064553,
"count": 1,
"self": 0.030149504000064553
},
"TrainerController.start_learning": {
"total": 2017.7606691669998,
"count": 1,
"self": 4.435523593009748,
"children": {
"TrainerController._reset_env": {
"total": 3.984107535000021,
"count": 1,
"self": 3.984107535000021
},
"TrainerController.advance": {
"total": 2009.2298031999899,
"count": 232603,
"self": 4.0820816178295445,
"children": {
"env_step": {
"total": 1540.4284629750525,
"count": 232603,
"self": 1262.8435318140646,
"children": {
"SubprocessEnvManager._take_step": {
"total": 274.7329533289569,
"count": 232603,
"self": 15.421734441865055,
"children": {
"TorchPolicy.evaluate": {
"total": 259.31121888709185,
"count": 222947,
"self": 259.31121888709185
}
}
},
"workers": {
"total": 2.851977832031025,
"count": 232603,
"self": 0.0,
"children": {
"worker_root": {
"total": 2010.1337813680516,
"count": 232603,
"is_parallel": true,
"self": 991.8063660719785,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0006958729999269053,
"count": 1,
"is_parallel": true,
"self": 0.00022289399987585057,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00047297900005105475,
"count": 2,
"is_parallel": true,
"self": 0.00047297900005105475
}
}
},
"UnityEnvironment.step": {
"total": 0.020615986999928282,
"count": 1,
"is_parallel": true,
"self": 0.00017257199999676232,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00016575999995893653,
"count": 1,
"is_parallel": true,
"self": 0.00016575999995893653
},
"communicator.exchange": {
"total": 0.01982149099990238,
"count": 1,
"is_parallel": true,
"self": 0.01982149099990238
},
"steps_from_proto": {
"total": 0.00045616400007020275,
"count": 1,
"is_parallel": true,
"self": 0.00014299300016773486,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0003131709999024679,
"count": 2,
"is_parallel": true,
"self": 0.0003131709999024679
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1018.3274152960731,
"count": 232602,
"is_parallel": true,
"self": 27.853549682048424,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 49.927217540883476,
"count": 232602,
"is_parallel": true,
"self": 49.927217540883476
},
"communicator.exchange": {
"total": 873.3815051310497,
"count": 232602,
"is_parallel": true,
"self": 873.3815051310497
},
"steps_from_proto": {
"total": 67.1651429420915,
"count": 232602,
"is_parallel": true,
"self": 26.06378599499783,
"children": {
"_process_rank_one_or_two_observation": {
"total": 41.10135694709368,
"count": 465204,
"is_parallel": true,
"self": 41.10135694709368
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 464.7192586071078,
"count": 232603,
"self": 6.302841353162535,
"children": {
"process_trajectory": {
"total": 126.3997085359473,
"count": 232603,
"self": 125.21612775294727,
"children": {
"RLTrainer._checkpoint": {
"total": 1.183580783000025,
"count": 10,
"self": 1.183580783000025
}
}
},
"_update_policy": {
"total": 332.016708717998,
"count": 97,
"self": 287.89861463399996,
"children": {
"TorchPPOOptimizer.update": {
"total": 44.11809408399802,
"count": 2910,
"self": 44.11809408399802
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0150001799047459e-06,
"count": 1,
"self": 1.0150001799047459e-06
},
"TrainerController._save_models": {
"total": 0.11123382400000992,
"count": 1,
"self": 0.0019994520002910576,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10923437199971886,
"count": 1,
"self": 0.10923437199971886
}
}
}
}
}
}
}