ppo-Pyramids / run_logs /timers.json
legalaspro's picture
Pyramids Agent Push
a2d375b verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2017671912908554,
"min": 0.1956200748682022,
"max": 0.3468891680240631,
"count": 17
},
"Pyramids.Policy.Entropy.sum": {
"value": 6001.36328125,
"min": 5887.3818359375,
"max": 10467.7275390625,
"count": 17
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 238.6315789473684,
"min": 232.609375,
"max": 322.23333333333335,
"count": 17
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27204.0,
"min": 18453.0,
"max": 32423.0,
"count": 17
},
"Pyramids.Step.mean": {
"value": 1499984.0,
"min": 1019951.0,
"max": 1499984.0,
"count": 17
},
"Pyramids.Step.sum": {
"value": 1499984.0,
"min": 1019951.0,
"max": 1499984.0,
"count": 17
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7601009607315063,
"min": 0.6408032178878784,
"max": 0.7900497913360596,
"count": 17
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 126.93685913085938,
"min": 79.10871887207031,
"max": 138.2587127685547,
"count": 17
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.017158646136522293,
"min": 0.014697253704071045,
"max": 0.019293153658509254,
"count": 17
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.8654937744140625,
"min": 2.180126428604126,
"max": 3.207824230194092,
"count": 17
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.747333312243746,
"min": 1.5924821979469723,
"max": 1.7672868003678877,
"count": 17
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 199.19599759578705,
"min": 121.72819799184799,
"max": 227.9799972474575,
"count": 17
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.747333312243746,
"min": 1.5924821979469723,
"max": 1.7672868003678877,
"count": 17
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 199.19599759578705,
"min": 121.72819799184799,
"max": 227.9799972474575,
"count": 17
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.035691248115510554,
"min": 0.035691248115510554,
"max": 0.05215429211080925,
"count": 17
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.0688022851682035,
"min": 3.314780578162754,
"max": 5.48682291759178,
"count": 17
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06969928937371957,
"min": 0.06422369384687167,
"max": 0.07258907158189605,
"count": 17
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.975790051232074,
"min": 0.5649106561759034,
"max": 1.0162470021465446,
"count": 17
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015370686575162213,
"min": 0.012745629658228627,
"max": 0.016346135751743075,
"count": 17
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21518961205227097,
"min": 0.11912501468425013,
"max": 0.22884590052440307,
"count": 17
},
"Pyramids.Policy.LearningRate.mean": {
"value": 4.928670442857142e-06,
"min": 4.928670442857142e-06,
"max": 0.0001629690257395417,
"count": 17
},
"Pyramids.Policy.LearningRate.sum": {
"value": 6.90013862e-05,
"min": 6.90013862e-05,
"max": 0.0021713462990642666,
"count": 17
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10098571428571432,
"min": 0.10098571428571432,
"max": 0.13259379166666665,
"count": 17
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4138000000000004,
"min": 1.0607503333333332,
"max": 1.8342690666666668,
"count": 17
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00010847285714285718,
"min": 0.00010847285714285718,
"max": 0.0032661197875,
"count": 17
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0015186200000000005,
"min": 0.0015186200000000005,
"max": 0.043523479760000004,
"count": 17
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01440468244254589,
"min": 0.01424525398761034,
"max": 0.01760612055659294,
"count": 17
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.20166555047035217,
"min": 0.13917449116706848,
"max": 0.24648568034172058,
"count": 17
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 17
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 17
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1731687124",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --resume --no-graphics --torch-device cuda:0 --no-graphics --debug",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1731688410"
},
"total": 1286.1273987120003,
"count": 1,
"self": 0.48131996800111665,
"children": {
"run_training.setup": {
"total": 0.07746751900049276,
"count": 1,
"self": 0.07746751900049276
},
"TrainerController.start_learning": {
"total": 1285.5686112249987,
"count": 1,
"self": 0.6796937353956309,
"children": {
"TrainerController._reset_env": {
"total": 2.9247552739998355,
"count": 1,
"self": 2.9247552739998355
},
"TrainerController.advance": {
"total": 1281.8517803436025,
"count": 32811,
"self": 0.7368759017263073,
"children": {
"env_step": {
"total": 924.4362790809355,
"count": 32811,
"self": 838.0878667697652,
"children": {
"SubprocessEnvManager._take_step": {
"total": 85.93083537592793,
"count": 32811,
"self": 2.341578512063279,
"children": {
"TorchPolicy.evaluate": {
"total": 83.58925686386465,
"count": 31340,
"self": 83.58925686386465
}
}
},
"workers": {
"total": 0.4175769352423231,
"count": 32811,
"self": 0.0,
"children": {
"worker_root": {
"total": 1282.916554070889,
"count": 32811,
"is_parallel": true,
"self": 503.96048706200054,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002794324000205961,
"count": 1,
"is_parallel": true,
"self": 0.0007855459953134414,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020087780048925197,
"count": 8,
"is_parallel": true,
"self": 0.0020087780048925197
}
}
},
"UnityEnvironment.step": {
"total": 0.059342993999962346,
"count": 1,
"is_parallel": true,
"self": 0.0006536159999086522,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004605960002663778,
"count": 1,
"is_parallel": true,
"self": 0.0004605960002663778
},
"communicator.exchange": {
"total": 0.05661947800035705,
"count": 1,
"is_parallel": true,
"self": 0.05661947800035705
},
"steps_from_proto": {
"total": 0.0016093039994302671,
"count": 1,
"is_parallel": true,
"self": 0.0003353039992362028,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012740000001940643,
"count": 8,
"is_parallel": true,
"self": 0.0012740000001940643
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 778.9560670088886,
"count": 32810,
"is_parallel": true,
"self": 16.734177104233822,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.052866471871312,
"count": 32810,
"is_parallel": true,
"self": 11.052866471871312
},
"communicator.exchange": {
"total": 702.7962260299992,
"count": 32810,
"is_parallel": true,
"self": 702.7962260299992
},
"steps_from_proto": {
"total": 48.37279740278427,
"count": 32810,
"is_parallel": true,
"self": 9.710107740927924,
"children": {
"_process_rank_one_or_two_observation": {
"total": 38.66268966185635,
"count": 262480,
"is_parallel": true,
"self": 38.66268966185635
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 356.6786253609407,
"count": 32811,
"self": 1.5444829939115152,
"children": {
"process_trajectory": {
"total": 70.74342205103676,
"count": 32811,
"self": 70.58841375103657,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1550083000001905,
"count": 1,
"self": 0.1550083000001905
}
}
},
"_update_policy": {
"total": 284.39072031599244,
"count": 231,
"self": 154.880350986974,
"children": {
"TorchPPOOptimizer.update": {
"total": 129.51036932901843,
"count": 11382,
"self": 129.51036932901843
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.970011211000383e-07,
"count": 1,
"self": 7.970011211000383e-07
},
"TrainerController._save_models": {
"total": 0.1123810749995755,
"count": 1,
"self": 0.0019194329997844761,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11046164199979103,
"count": 1,
"self": 0.11046164199979103
}
}
}
}
}
}
}