ppo-Huggy / run_logs /timers.json
bbillapati's picture
Huggy
5a228e0
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.402511715888977,
"min": 1.402511715888977,
"max": 1.4275639057159424,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68995.1640625,
"min": 68548.1484375,
"max": 77317.90625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 86.5569176882662,
"min": 78.58505564387917,
"max": 404.86290322580646,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49424.0,
"min": 48990.0,
"max": 50203.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999957.0,
"min": 49624.0,
"max": 1999957.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999957.0,
"min": 49624.0,
"max": 1999957.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.428128719329834,
"min": 0.06758305430412292,
"max": 2.4724278450012207,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1386.4615478515625,
"min": 8.312715530395508,
"max": 1511.0390625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6901333710984465,
"min": 1.6461194567079467,
"max": 4.0158667642272725,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2107.066154897213,
"min": 202.47269317507744,
"max": 2398.2472546100616,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6901333710984465,
"min": 1.6461194567079467,
"max": 4.0158667642272725,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2107.066154897213,
"min": 202.47269317507744,
"max": 2398.2472546100616,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01862225971158801,
"min": 0.013761348595532278,
"max": 0.01952023539472268,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.055866779134764025,
"min": 0.027522697191064555,
"max": 0.05856070618416803,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05947692609495587,
"min": 0.020138865523040294,
"max": 0.06290626993609799,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1784307782848676,
"min": 0.04027773104608059,
"max": 0.18871880980829397,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6212487929499946e-06,
"min": 3.6212487929499946e-06,
"max": 0.00029528280157240005,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0863746378849983e-05,
"min": 1.0863746378849983e-05,
"max": 0.000844263018579,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10120705,
"min": 0.10120705,
"max": 0.19842760000000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30362115,
"min": 0.20754789999999995,
"max": 0.581421,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.023179499999993e-05,
"min": 7.023179499999993e-05,
"max": 0.00492153724,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021069538499999976,
"min": 0.00021069538499999976,
"max": 0.0140729079,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679746482",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679749181"
},
"total": 2699.0899418970002,
"count": 1,
"self": 0.4833864709999034,
"children": {
"run_training.setup": {
"total": 0.11070473500001299,
"count": 1,
"self": 0.11070473500001299
},
"TrainerController.start_learning": {
"total": 2698.495850691,
"count": 1,
"self": 5.529625376957483,
"children": {
"TrainerController._reset_env": {
"total": 8.084042201000017,
"count": 1,
"self": 8.084042201000017
},
"TrainerController.advance": {
"total": 2684.7497910980424,
"count": 232554,
"self": 6.222745681976903,
"children": {
"env_step": {
"total": 2115.780198326051,
"count": 232554,
"self": 1780.6626306581788,
"children": {
"SubprocessEnvManager._take_step": {
"total": 331.5119251779793,
"count": 232554,
"self": 19.989117635975163,
"children": {
"TorchPolicy.evaluate": {
"total": 311.52280754200416,
"count": 222940,
"self": 311.52280754200416
}
}
},
"workers": {
"total": 3.6056424898929436,
"count": 232554,
"self": 0.0,
"children": {
"worker_root": {
"total": 2689.2425931820353,
"count": 232554,
"is_parallel": true,
"self": 1249.2155606970596,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010834169999895948,
"count": 1,
"is_parallel": true,
"self": 0.0003056489999835321,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007777680000060627,
"count": 2,
"is_parallel": true,
"self": 0.0007777680000060627
}
}
},
"UnityEnvironment.step": {
"total": 0.02988824400000567,
"count": 1,
"is_parallel": true,
"self": 0.0002993189999926926,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021591800000919648,
"count": 1,
"is_parallel": true,
"self": 0.00021591800000919648
},
"communicator.exchange": {
"total": 0.02865066600000432,
"count": 1,
"is_parallel": true,
"self": 0.02865066600000432
},
"steps_from_proto": {
"total": 0.0007223409999994601,
"count": 1,
"is_parallel": true,
"self": 0.00021892300000558862,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005034179999938715,
"count": 2,
"is_parallel": true,
"self": 0.0005034179999938715
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1440.0270324849757,
"count": 232553,
"is_parallel": true,
"self": 41.70935312188112,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.95461216403851,
"count": 232553,
"is_parallel": true,
"self": 84.95461216403851
},
"communicator.exchange": {
"total": 1213.2597293460412,
"count": 232553,
"is_parallel": true,
"self": 1213.2597293460412
},
"steps_from_proto": {
"total": 100.1033378530148,
"count": 232553,
"is_parallel": true,
"self": 39.24987472000768,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.853463133007125,
"count": 465106,
"is_parallel": true,
"self": 60.853463133007125
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 562.7468470900146,
"count": 232554,
"self": 8.679827762991522,
"children": {
"process_trajectory": {
"total": 165.01453262002133,
"count": 232554,
"self": 163.46847629602075,
"children": {
"RLTrainer._checkpoint": {
"total": 1.546056324000574,
"count": 10,
"self": 1.546056324000574
}
}
},
"_update_policy": {
"total": 389.0524867070018,
"count": 97,
"self": 327.2221348540047,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.83035185299707,
"count": 2910,
"self": 61.83035185299707
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.091000285668997e-06,
"count": 1,
"self": 1.091000285668997e-06
},
"TrainerController._save_models": {
"total": 0.13239092399999208,
"count": 1,
"self": 0.0023690259999966656,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13002189799999542,
"count": 1,
"self": 0.13002189799999542
}
}
}
}
}
}
}