ppo-Huggy / run_logs /timers.json
Tao2AIScienceHPC's picture
Huggy
5c907d5
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.407299280166626,
"min": 1.4072964191436768,
"max": 1.427763819694519,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70563.390625,
"min": 68770.34375,
"max": 77930.921875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 76.31424148606811,
"min": 76.31424148606811,
"max": 409.5772357723577,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49299.0,
"min": 49268.0,
"max": 50378.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999988.0,
"min": 49850.0,
"max": 1999988.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999988.0,
"min": 49850.0,
"max": 1999988.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4843177795410156,
"min": 0.21782329678535461,
"max": 2.488217353820801,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1604.8692626953125,
"min": 26.57444190979004,
"max": 1604.8692626953125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9509673758926036,
"min": 1.7931902792121543,
"max": 3.971829412454204,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2552.324924826622,
"min": 218.76921406388283,
"max": 2552.324924826622,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9509673758926036,
"min": 1.7931902792121543,
"max": 3.971829412454204,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2552.324924826622,
"min": 218.76921406388283,
"max": 2552.324924826622,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01524119201995846,
"min": 0.013519736984097917,
"max": 0.021313062674986817,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.045723576059875384,
"min": 0.027906211858498866,
"max": 0.056250810675555846,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.061009211341540026,
"min": 0.018988142783443133,
"max": 0.06261371974315909,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18302763402462008,
"min": 0.037976285566886266,
"max": 0.18784115922947725,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5506988164666695e-06,
"min": 3.5506988164666695e-06,
"max": 0.00029534820155059996,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0652096449400008e-05,
"min": 1.0652096449400008e-05,
"max": 0.0008442499685833496,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10118353333333334,
"min": 0.10118353333333334,
"max": 0.1984494,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035506,
"min": 0.20749555000000003,
"max": 0.5814166500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.90583133333334e-05,
"min": 6.90583133333334e-05,
"max": 0.00492262506,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002071749400000002,
"min": 0.0002071749400000002,
"max": 0.014072690835,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693114799",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693117460"
},
"total": 2660.822761142,
"count": 1,
"self": 0.4883301710001433,
"children": {
"run_training.setup": {
"total": 0.05589252200002193,
"count": 1,
"self": 0.05589252200002193
},
"TrainerController.start_learning": {
"total": 2660.278538449,
"count": 1,
"self": 5.274760529906871,
"children": {
"TrainerController._reset_env": {
"total": 4.256645673000094,
"count": 1,
"self": 4.256645673000094
},
"TrainerController.advance": {
"total": 2650.6215512800927,
"count": 232862,
"self": 5.168265947904729,
"children": {
"env_step": {
"total": 2063.0527183160934,
"count": 232862,
"self": 1742.342376774319,
"children": {
"SubprocessEnvManager._take_step": {
"total": 317.33456474991954,
"count": 232862,
"self": 18.764080316982245,
"children": {
"TorchPolicy.evaluate": {
"total": 298.5704844329373,
"count": 222939,
"self": 298.5704844329373
}
}
},
"workers": {
"total": 3.3757767918548325,
"count": 232862,
"self": 0.0,
"children": {
"worker_root": {
"total": 2651.8165967079403,
"count": 232862,
"is_parallel": true,
"self": 1230.0813775502372,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.000923929999999018,
"count": 1,
"is_parallel": true,
"self": 0.00024971499988168944,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006742150001173286,
"count": 2,
"is_parallel": true,
"self": 0.0006742150001173286
}
}
},
"UnityEnvironment.step": {
"total": 0.030045697000105065,
"count": 1,
"is_parallel": true,
"self": 0.0003255249998801446,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000216759000068123,
"count": 1,
"is_parallel": true,
"self": 0.000216759000068123
},
"communicator.exchange": {
"total": 0.02874523900004533,
"count": 1,
"is_parallel": true,
"self": 0.02874523900004533
},
"steps_from_proto": {
"total": 0.0007581740001114667,
"count": 1,
"is_parallel": true,
"self": 0.00022522500012200908,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005329489999894577,
"count": 2,
"is_parallel": true,
"self": 0.0005329489999894577
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1421.735219157703,
"count": 232861,
"is_parallel": true,
"self": 43.11391021566965,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.99633929099991,
"count": 232861,
"is_parallel": true,
"self": 83.99633929099991
},
"communicator.exchange": {
"total": 1188.2586101280592,
"count": 232861,
"is_parallel": true,
"self": 1188.2586101280592
},
"steps_from_proto": {
"total": 106.36635952297434,
"count": 232861,
"is_parallel": true,
"self": 37.379982568867945,
"children": {
"_process_rank_one_or_two_observation": {
"total": 68.98637695410639,
"count": 465722,
"is_parallel": true,
"self": 68.98637695410639
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 582.4005670160948,
"count": 232862,
"self": 7.89055671708843,
"children": {
"process_trajectory": {
"total": 147.90492898900288,
"count": 232862,
"self": 146.45672851900235,
"children": {
"RLTrainer._checkpoint": {
"total": 1.448200470000529,
"count": 10,
"self": 1.448200470000529
}
}
},
"_update_policy": {
"total": 426.60508131000347,
"count": 97,
"self": 362.6528046800029,
"children": {
"TorchPPOOptimizer.update": {
"total": 63.9522766300006,
"count": 2910,
"self": 63.9522766300006
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.680002222012263e-07,
"count": 1,
"self": 9.680002222012263e-07
},
"TrainerController._save_models": {
"total": 0.12557999799992103,
"count": 1,
"self": 0.002273506000165071,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12330649199975596,
"count": 1,
"self": 0.12330649199975596
}
}
}
}
}
}
}