testpyramidsrnd / run_logs /timers.json
SusBioRes-UBC's picture
First Pyramids
4b37d21
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8240864872932434,
"min": 0.8112577795982361,
"max": 1.4814174175262451,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 24696.224609375,
"min": 24467.53515625,
"max": 44940.27734375,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479976.0,
"min": 29952.0,
"max": 479976.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479976.0,
"min": 29952.0,
"max": 479976.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.14563710987567902,
"min": -0.09152958542108536,
"max": 0.14563710987567902,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 36.263641357421875,
"min": -21.967100143432617,
"max": 36.263641357421875,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.058837827295064926,
"min": 0.02931838296353817,
"max": 0.40576598048210144,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 14.650618553161621,
"min": 7.241640567779541,
"max": 96.16653442382812,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06551292970675796,
"min": 0.06551292970675796,
"max": 0.0724271701955688,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9171810158946113,
"min": 0.48936093182687046,
"max": 1.0029008920132139,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.009125758486782612,
"min": 0.0007770561829373332,
"max": 0.009781751784906926,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.12776061881495657,
"min": 0.007770561829373332,
"max": 0.13694452498869697,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.0743850228271425e-05,
"min": 2.0743850228271425e-05,
"max": 0.00029030126037577137,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00029041390319579994,
"min": 0.00029041390319579994,
"max": 0.0028154356615216003,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1069145857142857,
"min": 0.1069145857142857,
"max": 0.19676708571428575,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4968042,
"min": 1.3382272,
"max": 2.3384784000000005,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0007007671128571428,
"min": 0.0007007671128571428,
"max": 0.00967703186285714,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00981073958,
"min": 0.00981073958,
"max": 0.09389399216,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.02078682743012905,
"min": 0.02078682743012905,
"max": 0.47662487626075745,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2910155951976776,
"min": 0.2910155951976776,
"max": 3.336374044418335,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 725.1395348837209,
"min": 695.3636363636364,
"max": 999.0,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31181.0,
"min": 15984.0,
"max": 32672.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.5769627530907475,
"min": -1.0000000521540642,
"max": 0.5771090549162843,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 24.809398382902145,
"min": -32.000001668930054,
"max": 25.39279841631651,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.5769627530907475,
"min": -1.0000000521540642,
"max": 0.5771090549162843,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 24.809398382902145,
"min": -32.000001668930054,
"max": 25.39279841631651,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.15822068458358043,
"min": 0.15822068458358043,
"max": 10.198477046564221,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 6.803489437093958,
"min": 6.428280530730262,
"max": 163.17563274502754,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1657432745",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1657433776"
},
"total": 1030.7415311660002,
"count": 1,
"self": 0.4772998510002253,
"children": {
"run_training.setup": {
"total": 0.04340227100010452,
"count": 1,
"self": 0.04340227100010452
},
"TrainerController.start_learning": {
"total": 1030.2208290439999,
"count": 1,
"self": 0.6730740399914339,
"children": {
"TrainerController._reset_env": {
"total": 10.226432853999995,
"count": 1,
"self": 10.226432853999995
},
"TrainerController.advance": {
"total": 1019.2264794710088,
"count": 31649,
"self": 0.719046518056075,
"children": {
"env_step": {
"total": 656.2663542909729,
"count": 31649,
"self": 602.9678051790122,
"children": {
"SubprocessEnvManager._take_step": {
"total": 52.933573677975346,
"count": 31649,
"self": 2.3392168470313663,
"children": {
"TorchPolicy.evaluate": {
"total": 50.59435683094398,
"count": 31308,
"self": 17.57771765294251,
"children": {
"TorchPolicy.sample_actions": {
"total": 33.01663917800147,
"count": 31308,
"self": 33.01663917800147
}
}
}
}
},
"workers": {
"total": 0.36497543398536436,
"count": 31649,
"self": 0.0,
"children": {
"worker_root": {
"total": 1028.136700980038,
"count": 31649,
"is_parallel": true,
"self": 476.09514795201403,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005482437999944523,
"count": 1,
"is_parallel": true,
"self": 0.004173504000391404,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001308933999553119,
"count": 8,
"is_parallel": true,
"self": 0.001308933999553119
}
}
},
"UnityEnvironment.step": {
"total": 0.04943937300004109,
"count": 1,
"is_parallel": true,
"self": 0.0005826089998208772,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047796900003049814,
"count": 1,
"is_parallel": true,
"self": 0.00047796900003049814
},
"communicator.exchange": {
"total": 0.04668553900000916,
"count": 1,
"is_parallel": true,
"self": 0.04668553900000916
},
"steps_from_proto": {
"total": 0.0016932560001805541,
"count": 1,
"is_parallel": true,
"self": 0.00043974999994134123,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012535060002392129,
"count": 8,
"is_parallel": true,
"self": 0.0012535060002392129
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 552.041553028024,
"count": 31648,
"is_parallel": true,
"self": 14.455718548986624,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.97533555800942,
"count": 31648,
"is_parallel": true,
"self": 11.97533555800942
},
"communicator.exchange": {
"total": 477.8063062820247,
"count": 31648,
"is_parallel": true,
"self": 477.8063062820247
},
"steps_from_proto": {
"total": 47.804192639003304,
"count": 31648,
"is_parallel": true,
"self": 11.781612921938631,
"children": {
"_process_rank_one_or_two_observation": {
"total": 36.02257971706467,
"count": 253184,
"is_parallel": true,
"self": 36.02257971706467
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 362.2410786619798,
"count": 31649,
"self": 1.20137732094895,
"children": {
"process_trajectory": {
"total": 81.8424588180269,
"count": 31649,
"self": 81.73584209602723,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10661672199967143,
"count": 1,
"self": 0.10661672199967143
}
}
},
"_update_policy": {
"total": 279.197242523004,
"count": 210,
"self": 109.81181349898179,
"children": {
"TorchPPOOptimizer.update": {
"total": 169.3854290240222,
"count": 11430,
"self": 169.3854290240222
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0909998309216462e-06,
"count": 1,
"self": 1.0909998309216462e-06
},
"TrainerController._save_models": {
"total": 0.09484158799978104,
"count": 1,
"self": 0.0015464620000784635,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09329512599970258,
"count": 1,
"self": 0.09329512599970258
}
}
}
}
}
}
}