ppo-Pyramids / run_logs /timers.json
yabezsh's picture
First Push
e12be4a verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.38108545541763306,
"min": 0.38108545541763306,
"max": 1.4502646923065186,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11456.953125,
"min": 11456.953125,
"max": 43995.23046875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989885.0,
"min": 29952.0,
"max": 989885.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989885.0,
"min": 29952.0,
"max": 989885.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.580700159072876,
"min": -0.09279569983482361,
"max": 0.6002334356307983,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 161.4346466064453,
"min": -22.270967483520508,
"max": 167.4651336669922,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.00018589460523799062,
"min": -0.00018589460523799062,
"max": 0.47792017459869385,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.05167870223522186,
"min": -0.05167870223522186,
"max": 113.26708221435547,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06829555472617763,
"min": 0.0642029486905772,
"max": 0.07347868600932304,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0244333208926644,
"min": 0.5019391628300729,
"max": 1.0483960632506448,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01591133770675191,
"min": 0.0006323193366627158,
"max": 0.016170097332395102,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23867006560127868,
"min": 0.008852470713278021,
"max": 0.23867006560127868,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.439137520319998e-06,
"min": 7.439137520319998e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011158706280479998,
"min": 0.00011158706280479998,
"max": 0.0036347128884290994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247968,
"min": 0.10247968,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5371952,
"min": 1.3886848,
"max": 2.6115709000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025772003199999995,
"min": 0.00025772003199999995,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003865800479999999,
"min": 0.003865800479999999,
"max": 0.12117593291,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013026198372244835,
"min": 0.013026198372244835,
"max": 0.5194559693336487,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19539298117160797,
"min": 0.19026552140712738,
"max": 3.6361918449401855,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 311.88775510204084,
"min": 301.6,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30565.0,
"min": 15984.0,
"max": 33224.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6064550874336643,
"min": -1.0000000521540642,
"max": 1.6983999757303132,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 157.4325985684991,
"min": -29.75680171698332,
"max": 158.77399790287018,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6064550874336643,
"min": -1.0000000521540642,
"max": 1.6983999757303132,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 157.4325985684991,
"min": -29.75680171698332,
"max": 158.77399790287018,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04174828156472507,
"min": 0.04174828156472507,
"max": 11.16864543221891,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.091331593343057,
"min": 4.064758014224935,
"max": 178.69832691550255,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1738859358",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1738861606"
},
"total": 2247.350028364,
"count": 1,
"self": 0.4896409330003735,
"children": {
"run_training.setup": {
"total": 0.022402275999979793,
"count": 1,
"self": 0.022402275999979793
},
"TrainerController.start_learning": {
"total": 2246.837985155,
"count": 1,
"self": 1.3803306059949136,
"children": {
"TrainerController._reset_env": {
"total": 3.361281510000026,
"count": 1,
"self": 3.361281510000026
},
"TrainerController.advance": {
"total": 2242.008870151005,
"count": 64043,
"self": 1.3749469710523954,
"children": {
"env_step": {
"total": 1555.3213305939996,
"count": 64043,
"self": 1402.0470170680085,
"children": {
"SubprocessEnvManager._take_step": {
"total": 152.48256537701627,
"count": 64043,
"self": 4.5754033099874505,
"children": {
"TorchPolicy.evaluate": {
"total": 147.90716206702882,
"count": 62560,
"self": 147.90716206702882
}
}
},
"workers": {
"total": 0.7917481489748752,
"count": 64043,
"self": 0.0,
"children": {
"worker_root": {
"total": 2241.7443339250644,
"count": 64043,
"is_parallel": true,
"self": 952.6183083360897,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006015026999989459,
"count": 1,
"is_parallel": true,
"self": 0.004565960999798335,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014490660001911237,
"count": 8,
"is_parallel": true,
"self": 0.0014490660001911237
}
}
},
"UnityEnvironment.step": {
"total": 0.11013070299998162,
"count": 1,
"is_parallel": true,
"self": 0.0005583490000162783,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004463889999897219,
"count": 1,
"is_parallel": true,
"self": 0.0004463889999897219
},
"communicator.exchange": {
"total": 0.10224656599996251,
"count": 1,
"is_parallel": true,
"self": 0.10224656599996251
},
"steps_from_proto": {
"total": 0.006879399000013109,
"count": 1,
"is_parallel": true,
"self": 0.005585525999890706,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001293873000122403,
"count": 8,
"is_parallel": true,
"self": 0.001293873000122403
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1289.1260255889747,
"count": 64042,
"is_parallel": true,
"self": 32.030437510974934,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.859823214023436,
"count": 64042,
"is_parallel": true,
"self": 22.859823214023436
},
"communicator.exchange": {
"total": 1140.9690591179813,
"count": 64042,
"is_parallel": true,
"self": 1140.9690591179813
},
"steps_from_proto": {
"total": 93.26670574599507,
"count": 64042,
"is_parallel": true,
"self": 18.532343053978934,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.73436269201613,
"count": 512336,
"is_parallel": true,
"self": 74.73436269201613
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 685.312592585953,
"count": 64043,
"self": 2.7147078639774236,
"children": {
"process_trajectory": {
"total": 127.77151527897615,
"count": 64043,
"self": 127.52097887297612,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25053640600003746,
"count": 2,
"self": 0.25053640600003746
}
}
},
"_update_policy": {
"total": 554.8263694429994,
"count": 455,
"self": 305.1522465550005,
"children": {
"TorchPPOOptimizer.update": {
"total": 249.67412288799892,
"count": 22803,
"self": 249.67412288799892
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.66999778029276e-07,
"count": 1,
"self": 9.66999778029276e-07
},
"TrainerController._save_models": {
"total": 0.08750192100023924,
"count": 1,
"self": 0.0014732660001754994,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08602865500006374,
"count": 1,
"self": 0.08602865500006374
}
}
}
}
}
}
}