ppo-Huggy / run_logs /timers.json
Cicatrice's picture
Huggy
da1da09
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.407550573348999,
"min": 1.407550573348999,
"max": 1.428611159324646,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71488.0859375,
"min": 69080.6953125,
"max": 77182.703125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 83.66440677966102,
"min": 78.64193548387097,
"max": 387.0077519379845,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49362.0,
"min": 48739.0,
"max": 50073.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999926.0,
"min": 49303.0,
"max": 1999926.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999926.0,
"min": 49303.0,
"max": 1999926.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4860761165618896,
"min": 0.1523505002260208,
"max": 2.513928174972534,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1466.784912109375,
"min": 19.500864028930664,
"max": 1532.2078857421875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.896628303952136,
"min": 1.7316654763417318,
"max": 4.078883149545558,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2299.0106993317604,
"min": 221.65318097174168,
"max": 2392.5368205308914,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.896628303952136,
"min": 1.7316654763417318,
"max": 4.078883149545558,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2299.0106993317604,
"min": 221.65318097174168,
"max": 2392.5368205308914,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01717438139215422,
"min": 0.012967907763656208,
"max": 0.02018370325887291,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.051523144176462664,
"min": 0.025935815527312417,
"max": 0.06055110977661873,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06302236210968759,
"min": 0.021789403259754182,
"max": 0.06302236210968759,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18906708632906277,
"min": 0.043578806519508365,
"max": 0.18906708632906277,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4444988518666697e-06,
"min": 3.4444988518666697e-06,
"max": 0.000295286326571225,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0333496555600009e-05,
"min": 1.0333496555600009e-05,
"max": 0.0008441161686279502,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10114813333333335,
"min": 0.10114813333333335,
"max": 0.198428775,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30344440000000006,
"min": 0.20747689999999996,
"max": 0.58137205,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.72918533333334e-05,
"min": 6.72918533333334e-05,
"max": 0.0049215958725,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020187556000000022,
"min": 0.00020187556000000022,
"max": 0.014070465295000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693000607",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693003258"
},
"total": 2650.887270763,
"count": 1,
"self": 0.4486288679995596,
"children": {
"run_training.setup": {
"total": 0.04171390800001973,
"count": 1,
"self": 0.04171390800001973
},
"TrainerController.start_learning": {
"total": 2650.3969279870003,
"count": 1,
"self": 4.81166919614725,
"children": {
"TrainerController._reset_env": {
"total": 5.286762990999932,
"count": 1,
"self": 5.286762990999932
},
"TrainerController.advance": {
"total": 2640.1571647488536,
"count": 233098,
"self": 4.991912429822605,
"children": {
"env_step": {
"total": 2044.5265322231064,
"count": 233098,
"self": 1722.6456540281843,
"children": {
"SubprocessEnvManager._take_step": {
"total": 318.60366948099704,
"count": 233098,
"self": 17.832372236092397,
"children": {
"TorchPolicy.evaluate": {
"total": 300.77129724490464,
"count": 223037,
"self": 300.77129724490464
}
}
},
"workers": {
"total": 3.27720871392512,
"count": 233098,
"self": 0.0,
"children": {
"worker_root": {
"total": 2642.3072844929566,
"count": 233098,
"is_parallel": true,
"self": 1240.4021526860051,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009562459999870043,
"count": 1,
"is_parallel": true,
"self": 0.0002876370000421957,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006686089999448086,
"count": 2,
"is_parallel": true,
"self": 0.0006686089999448086
}
}
},
"UnityEnvironment.step": {
"total": 0.05294102999994266,
"count": 1,
"is_parallel": true,
"self": 0.000372372999891013,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002549629999748504,
"count": 1,
"is_parallel": true,
"self": 0.0002549629999748504
},
"communicator.exchange": {
"total": 0.05147999200005415,
"count": 1,
"is_parallel": true,
"self": 0.05147999200005415
},
"steps_from_proto": {
"total": 0.0008337020000226403,
"count": 1,
"is_parallel": true,
"self": 0.000263334000010218,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005703680000124223,
"count": 2,
"is_parallel": true,
"self": 0.0005703680000124223
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1401.9051318069514,
"count": 233097,
"is_parallel": true,
"self": 41.387151856047694,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 87.67197050301831,
"count": 233097,
"is_parallel": true,
"self": 87.67197050301831
},
"communicator.exchange": {
"total": 1169.3524227879007,
"count": 233097,
"is_parallel": true,
"self": 1169.3524227879007
},
"steps_from_proto": {
"total": 103.49358665998477,
"count": 233097,
"is_parallel": true,
"self": 39.25497721108434,
"children": {
"_process_rank_one_or_two_observation": {
"total": 64.23860944890043,
"count": 466194,
"is_parallel": true,
"self": 64.23860944890043
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 590.6387200959247,
"count": 233098,
"self": 7.217933865059422,
"children": {
"process_trajectory": {
"total": 159.58540925486466,
"count": 233098,
"self": 157.86416886686413,
"children": {
"RLTrainer._checkpoint": {
"total": 1.721240388000524,
"count": 10,
"self": 1.721240388000524
}
}
},
"_update_policy": {
"total": 423.8353769760006,
"count": 97,
"self": 361.65712315700773,
"children": {
"TorchPPOOptimizer.update": {
"total": 62.17825381899286,
"count": 2910,
"self": 62.17825381899286
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1100000847363845e-06,
"count": 1,
"self": 1.1100000847363845e-06
},
"TrainerController._save_models": {
"total": 0.1413299409996398,
"count": 1,
"self": 0.0024211369996010035,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1389088040000388,
"count": 1,
"self": 0.1389088040000388
}
}
}
}
}
}
}