ppo-pyramidernd / run_logs /timers.json
LeKyks1's picture
First Push
bd71723
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8291571140289307,
"min": 0.8291571140289307,
"max": 1.429126501083374,
"count": 3
},
"Pyramids.Policy.Entropy.sum": {
"value": 24834.9140625,
"min": 24834.9140625,
"max": 43353.98046875,
"count": 3
},
"Pyramids.Step.mean": {
"value": 89877.0,
"min": 29987.0,
"max": 89877.0,
"count": 3
},
"Pyramids.Step.sum": {
"value": 89877.0,
"min": 29987.0,
"max": 89877.0,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.034973643720149994,
"min": -0.034973643720149994,
"max": 0.2029203176498413,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -8.428647994995117,
"min": -8.428647994995117,
"max": 48.497955322265625,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.16937725245952606,
"min": 0.16937725245952606,
"max": 0.5154863595962524,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 40.81991958618164,
"min": 40.81991958618164,
"max": 123.20123291015625,
"count": 3
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0645663019805308,
"min": 0.0645663019805308,
"max": 0.06989178973381485,
"count": 3
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.8393619257469005,
"min": 0.46641888840949625,
"max": 0.8393619257469005,
"count": 3
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0015360831726496929,
"min": 0.0015360831726496929,
"max": 0.006238869652745727,
"count": 3
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.019969081244446008,
"min": 0.019969081244446008,
"max": 0.043672087569220086,
"count": 3
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.632999763361537e-05,
"min": 7.632999763361537e-05,
"max": 0.0002515063018788571,
"count": 3
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0009922899692369998,
"min": 0.0009922899692369998,
"max": 0.0018497614834129998,
"count": 3
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.12544330769230772,
"min": 0.12544330769230772,
"max": 0.1838354285714286,
"count": 3
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.6307630000000004,
"min": 1.2868480000000002,
"max": 1.716587,
"count": 3
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0025517864384615387,
"min": 0.0025517864384615387,
"max": 0.008385159314285713,
"count": 3
},
"Pyramids.Policy.Beta.sum": {
"value": 0.033173223700000004,
"min": 0.033173223700000004,
"max": 0.06170704130000001,
"count": 3
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.11156826466321945,
"min": 0.11156826466321945,
"max": 0.4491786062717438,
"count": 3
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.4503874778747559,
"min": 1.4503874778747559,
"max": 3.1442501544952393,
"count": 3
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 987.25,
"min": 975.1666666666666,
"max": 989.258064516129,
"count": 3
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31592.0,
"min": 17553.0,
"max": 31592.0,
"count": 3
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.8631500541232526,
"min": -0.925703278712688,
"max": -0.7538333833217621,
"count": 3
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -27.620801731944084,
"min": -28.696801640093327,
"max": -13.569000899791718,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.8631500541232526,
"min": -0.925703278712688,
"max": -0.7538333833217621,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -27.620801731944084,
"min": -28.696801640093327,
"max": -13.569000899791718,
"count": 3
},
"Pyramids.Policy.RndReward.mean": {
"value": 1.3217137991450727,
"min": 1.3217137991450727,
"max": 8.747148085178601,
"count": 3
},
"Pyramids.Policy.RndReward.sum": {
"value": 42.294841572642326,
"min": 42.294841572642326,
"max": 157.4486655332148,
"count": 3
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1700232466",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1700232667"
},
"total": 200.870019856,
"count": 1,
"self": 0.47795789099996,
"children": {
"run_training.setup": {
"total": 0.04618172200002846,
"count": 1,
"self": 0.04618172200002846
},
"TrainerController.start_learning": {
"total": 200.345880243,
"count": 1,
"self": 0.1280172359918197,
"children": {
"TrainerController._reset_env": {
"total": 3.4935932840000987,
"count": 1,
"self": 3.4935932840000987
},
"TrainerController.advance": {
"total": 196.63957389100824,
"count": 6313,
"self": 0.14096415300991794,
"children": {
"env_step": {
"total": 131.96104628998933,
"count": 6313,
"self": 118.08794314098964,
"children": {
"SubprocessEnvManager._take_step": {
"total": 13.793599730006463,
"count": 6313,
"self": 0.4615043740072906,
"children": {
"TorchPolicy.evaluate": {
"total": 13.332095355999172,
"count": 6302,
"self": 13.332095355999172
}
}
},
"workers": {
"total": 0.07950341899322666,
"count": 6313,
"self": 0.0,
"children": {
"worker_root": {
"total": 199.8954041300026,
"count": 6313,
"is_parallel": true,
"self": 93.32963902700044,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001967113000091558,
"count": 1,
"is_parallel": true,
"self": 0.0006219740000688034,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013451390000227548,
"count": 8,
"is_parallel": true,
"self": 0.0013451390000227548
}
}
},
"UnityEnvironment.step": {
"total": 0.047242957999969803,
"count": 1,
"is_parallel": true,
"self": 0.0006110190001891169,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048110900002029666,
"count": 1,
"is_parallel": true,
"self": 0.00048110900002029666
},
"communicator.exchange": {
"total": 0.044373499999892374,
"count": 1,
"is_parallel": true,
"self": 0.044373499999892374
},
"steps_from_proto": {
"total": 0.0017773299998680159,
"count": 1,
"is_parallel": true,
"self": 0.0003583789998629072,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014189510000051087,
"count": 8,
"is_parallel": true,
"self": 0.0014189510000051087
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 106.56576510300215,
"count": 6312,
"is_parallel": true,
"self": 3.46827088200871,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.531903731005059,
"count": 6312,
"is_parallel": true,
"self": 2.531903731005059
},
"communicator.exchange": {
"total": 90.53364449599326,
"count": 6312,
"is_parallel": true,
"self": 90.53364449599326
},
"steps_from_proto": {
"total": 10.031945993995123,
"count": 6312,
"is_parallel": true,
"self": 2.023131589973218,
"children": {
"_process_rank_one_or_two_observation": {
"total": 8.008814404021905,
"count": 50496,
"is_parallel": true,
"self": 8.008814404021905
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 64.537563448009,
"count": 6313,
"self": 0.17225007000274672,
"children": {
"process_trajectory": {
"total": 11.783132653006305,
"count": 6313,
"self": 11.783132653006305
},
"_update_policy": {
"total": 52.582180724999944,
"count": 35,
"self": 31.66753413499805,
"children": {
"TorchPPOOptimizer.update": {
"total": 20.914646590001894,
"count": 2277,
"self": 20.914646590001894
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1119998362119077e-06,
"count": 1,
"self": 1.1119998362119077e-06
},
"TrainerController._save_models": {
"total": 0.08469472000001588,
"count": 1,
"self": 0.0012389270000312536,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08345579299998462,
"count": 1,
"self": 0.08345579299998462
}
}
}
}
}
}
}