ppo-Piramids / run_logs /timers.json
gian-cr's picture
First Push
78d0c7b
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.529485821723938,
"min": 0.4993588626384735,
"max": 1.4616461992263794,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15926.93359375,
"min": 15036.6943359375,
"max": 44340.5,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989889.0,
"min": 29952.0,
"max": 989889.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989889.0,
"min": 29952.0,
"max": 989889.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4258022606372833,
"min": -0.11910755932331085,
"max": 0.4258022606372833,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 114.11500549316406,
"min": -28.70492172241211,
"max": 114.11500549316406,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.012714996002614498,
"min": -0.007144283503293991,
"max": 0.4556998014450073,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.407618999481201,
"min": -1.857513666152954,
"max": 108.0008544921875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06884660100103264,
"min": 0.06531196481124166,
"max": 0.0740321751673597,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.963852414014457,
"min": 0.49166428222305586,
"max": 1.037727091414055,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015690361774994974,
"min": 0.00019108918636427351,
"max": 0.016808480010601087,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21966506484992962,
"min": 0.0022930702363712823,
"max": 0.2353187201484152,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.644204594821426e-06,
"min": 7.644204594821426e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010701886432749996,
"min": 0.00010701886432749996,
"max": 0.0029056149314618,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254803571428571,
"min": 0.10254803571428571,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356725,
"min": 1.3691136000000002,
"max": 2.3600056,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026454876785714275,
"min": 0.00026454876785714275,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037036827499999985,
"min": 0.0037036827499999985,
"max": 0.09688696617999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012615271843969822,
"min": 0.012615271843969822,
"max": 0.5443219542503357,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17661380767822266,
"min": 0.17661380767822266,
"max": 3.810253620147705,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 408.5,
"min": 408.5,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28595.0,
"min": 15984.0,
"max": 32738.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5343285558479174,
"min": -1.0000000521540642,
"max": 1.5343285558479174,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 107.40299890935421,
"min": -32.000001668930054,
"max": 107.40299890935421,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5343285558479174,
"min": -1.0000000521540642,
"max": 1.5343285558479174,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 107.40299890935421,
"min": -32.000001668930054,
"max": 107.40299890935421,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05344877265535095,
"min": 0.05344877265535095,
"max": 11.804602368734777,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.741414085874567,
"min": 3.741414085874567,
"max": 188.87363789975643,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680000092",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680002389"
},
"total": 2297.487206656,
"count": 1,
"self": 0.48180778799996915,
"children": {
"run_training.setup": {
"total": 0.16622535200008315,
"count": 1,
"self": 0.16622535200008315
},
"TrainerController.start_learning": {
"total": 2296.839173516,
"count": 1,
"self": 1.627811249982642,
"children": {
"TrainerController._reset_env": {
"total": 7.029436982000107,
"count": 1,
"self": 7.029436982000107
},
"TrainerController.advance": {
"total": 2288.082484776017,
"count": 63444,
"self": 1.8137734400579575,
"children": {
"env_step": {
"total": 1621.8420421009735,
"count": 63444,
"self": 1493.7257289928943,
"children": {
"SubprocessEnvManager._take_step": {
"total": 127.10540279005272,
"count": 63444,
"self": 5.42739394002092,
"children": {
"TorchPolicy.evaluate": {
"total": 121.6780088500318,
"count": 62550,
"self": 121.6780088500318
}
}
},
"workers": {
"total": 1.01091031802639,
"count": 63444,
"self": 0.0,
"children": {
"worker_root": {
"total": 2290.780259015002,
"count": 63444,
"is_parallel": true,
"self": 929.3792146330138,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0028713919998608617,
"count": 1,
"is_parallel": true,
"self": 0.0009419079995041102,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019294840003567515,
"count": 8,
"is_parallel": true,
"self": 0.0019294840003567515
}
}
},
"UnityEnvironment.step": {
"total": 0.048371620000125404,
"count": 1,
"is_parallel": true,
"self": 0.0005384570004025591,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005225019999670621,
"count": 1,
"is_parallel": true,
"self": 0.0005225019999670621
},
"communicator.exchange": {
"total": 0.0454949149998356,
"count": 1,
"is_parallel": true,
"self": 0.0454949149998356
},
"steps_from_proto": {
"total": 0.001815745999920182,
"count": 1,
"is_parallel": true,
"self": 0.0004092859996944753,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014064600002257066,
"count": 8,
"is_parallel": true,
"self": 0.0014064600002257066
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1361.401044381988,
"count": 63443,
"is_parallel": true,
"self": 33.86037188601222,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.819979234004677,
"count": 63443,
"is_parallel": true,
"self": 25.819979234004677
},
"communicator.exchange": {
"total": 1197.5908966179738,
"count": 63443,
"is_parallel": true,
"self": 1197.5908966179738
},
"steps_from_proto": {
"total": 104.12979664399722,
"count": 63443,
"is_parallel": true,
"self": 23.011589422063707,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.11820722193352,
"count": 507544,
"is_parallel": true,
"self": 81.11820722193352
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 664.4266692349854,
"count": 63444,
"self": 2.8843581470086974,
"children": {
"process_trajectory": {
"total": 131.28870298897573,
"count": 63444,
"self": 131.0153982869756,
"children": {
"RLTrainer._checkpoint": {
"total": 0.27330470200013224,
"count": 2,
"self": 0.27330470200013224
}
}
},
"_update_policy": {
"total": 530.253608099001,
"count": 434,
"self": 336.2344478450418,
"children": {
"TorchPPOOptimizer.update": {
"total": 194.01916025395917,
"count": 22860,
"self": 194.01916025395917
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.890000001178123e-07,
"count": 1,
"self": 9.890000001178123e-07
},
"TrainerController._save_models": {
"total": 0.09943951900004322,
"count": 1,
"self": 0.0019331210000927967,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09750639799995042,
"count": 1,
"self": 0.09750639799995042
}
}
}
}
}
}
}