ppo-PyramidsRND / run_logs /timers.json
Mykolyt's picture
First push
20164cc
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2819635272026062,
"min": 0.27301838994026184,
"max": 1.4733399152755737,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8418.302734375,
"min": 8168.7099609375,
"max": 44695.23828125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989948.0,
"min": 29952.0,
"max": 989948.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989948.0,
"min": 29952.0,
"max": 989948.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6652859449386597,
"min": -0.10367186367511749,
"max": 0.7337735295295715,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 194.26348876953125,
"min": -24.98491859436035,
"max": 212.79432678222656,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0029677217826247215,
"min": -0.02047145366668701,
"max": 0.3420800566673279,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.866574764251709,
"min": -5.732007026672363,
"max": 81.0729751586914,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06887569674305934,
"min": 0.06424872283919261,
"max": 0.07292385820623971,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0331354511458901,
"min": 0.5046665744763001,
"max": 1.0346645842655562,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015557455142969084,
"min": 0.0018776939496129717,
"max": 0.01714905900319406,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23336182714453627,
"min": 0.013143857647290803,
"max": 0.24008682604471687,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.538977487040002e-06,
"min": 7.538977487040002e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011308466230560003,
"min": 0.00011308466230560003,
"max": 0.0036356851881049995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251296,
"min": 0.10251296,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5376944,
"min": 1.3691136000000002,
"max": 2.6623572999999996,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002610447040000001,
"min": 0.0002610447040000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003915670560000002,
"min": 0.003915670560000002,
"max": 0.12120831050000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013166411779820919,
"min": 0.013156048953533173,
"max": 0.4454827904701233,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1974961757659912,
"min": 0.18418468534946442,
"max": 3.118379592895508,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 276.88990825688074,
"min": 265.3302752293578,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30181.0,
"min": 15984.0,
"max": 33014.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6680476900634416,
"min": -1.0000000521540642,
"max": 1.7262547031607267,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 181.81719821691513,
"min": -30.383001655340195,
"max": 191.6413984745741,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6680476900634416,
"min": -1.0000000521540642,
"max": 1.7262547031607267,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 181.81719821691513,
"min": -30.383001655340195,
"max": 191.6413984745741,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03752557851145591,
"min": 0.036573676126532846,
"max": 8.073556842282414,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.090288057748694,
"min": 4.023104373918613,
"max": 129.17690947651863,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676566192",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676568662"
},
"total": 2470.560556863,
"count": 1,
"self": 1.3569095980001293,
"children": {
"run_training.setup": {
"total": 0.1062906289998864,
"count": 1,
"self": 0.1062906289998864
},
"TrainerController.start_learning": {
"total": 2469.097356636,
"count": 1,
"self": 1.4675131910162236,
"children": {
"TrainerController._reset_env": {
"total": 6.051501912000276,
"count": 1,
"self": 6.051501912000276
},
"TrainerController.advance": {
"total": 2461.4231611699843,
"count": 64358,
"self": 1.5106177609477527,
"children": {
"env_step": {
"total": 1702.116108076024,
"count": 64358,
"self": 1581.3736658663574,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.85511575875444,
"count": 64358,
"self": 4.894395975962652,
"children": {
"TorchPolicy.evaluate": {
"total": 114.96071978279178,
"count": 62555,
"self": 38.84136722373114,
"children": {
"TorchPolicy.sample_actions": {
"total": 76.11935255906064,
"count": 62555,
"self": 76.11935255906064
}
}
}
}
},
"workers": {
"total": 0.8873264509120418,
"count": 64358,
"self": 0.0,
"children": {
"worker_root": {
"total": 2463.1873500470238,
"count": 64358,
"is_parallel": true,
"self": 1006.6586270919852,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023580259994560038,
"count": 1,
"is_parallel": true,
"self": 0.0007844909996492788,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001573534999806725,
"count": 8,
"is_parallel": true,
"self": 0.001573534999806725
}
}
},
"UnityEnvironment.step": {
"total": 0.08238333600002079,
"count": 1,
"is_parallel": true,
"self": 0.0005865940001967829,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004927700001644553,
"count": 1,
"is_parallel": true,
"self": 0.0004927700001644553
},
"communicator.exchange": {
"total": 0.07897664699976303,
"count": 1,
"is_parallel": true,
"self": 0.07897664699976303
},
"steps_from_proto": {
"total": 0.0023273249998965184,
"count": 1,
"is_parallel": true,
"self": 0.0009477009989495855,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001379624000946933,
"count": 8,
"is_parallel": true,
"self": 0.001379624000946933
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1456.5287229550386,
"count": 64357,
"is_parallel": true,
"self": 32.67080193088441,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.81137991999185,
"count": 64357,
"is_parallel": true,
"self": 24.81137991999185
},
"communicator.exchange": {
"total": 1288.642678797005,
"count": 64357,
"is_parallel": true,
"self": 1288.642678797005
},
"steps_from_proto": {
"total": 110.4038623071574,
"count": 64357,
"is_parallel": true,
"self": 25.0547896241942,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.3490726829632,
"count": 514856,
"is_parallel": true,
"self": 85.3490726829632
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 757.7964353330126,
"count": 64358,
"self": 2.681436157070493,
"children": {
"process_trajectory": {
"total": 176.46812352693814,
"count": 64358,
"self": 176.21663474693833,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2514887799998178,
"count": 2,
"self": 0.2514887799998178
}
}
},
"_update_policy": {
"total": 578.646875649004,
"count": 454,
"self": 223.76716579194544,
"children": {
"TorchPPOOptimizer.update": {
"total": 354.8797098570585,
"count": 22800,
"self": 354.8797098570585
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4559991541318595e-06,
"count": 1,
"self": 1.4559991541318595e-06
},
"TrainerController._save_models": {
"total": 0.15517890699993586,
"count": 1,
"self": 0.002144877999853634,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15303402900008223,
"count": 1,
"self": 0.15303402900008223
}
}
}
}
}
}
}