ppo-Huggy / run_logs /timers.json
Raaniel's picture
Huggy
a1910a1
raw
history blame
17.5 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.40550696849823,
"min": 1.40550696849823,
"max": 1.432058334350586,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70919.0703125,
"min": 68008.4765625,
"max": 77060.078125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 109.41098901098901,
"min": 100.05050505050505,
"max": 440.3333333333333,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49782.0,
"min": 49206.0,
"max": 50198.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999499.0,
"min": 49967.0,
"max": 1999499.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999499.0,
"min": 49967.0,
"max": 1999499.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3004441261291504,
"min": 0.001205666922032833,
"max": 2.385592222213745,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1046.7020263671875,
"min": 0.13624036312103271,
"max": 1163.8505859375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5479598156698455,
"min": 1.7511248287901413,
"max": 3.712595601788077,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1614.3217161297798,
"min": 197.87710565328598,
"max": 1756.4053381681442,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5479598156698455,
"min": 1.7511248287901413,
"max": 3.712595601788077,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1614.3217161297798,
"min": 197.87710565328598,
"max": 1756.4053381681442,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016540085083882633,
"min": 0.01353405811241828,
"max": 0.019683321114765326,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.033080170167765266,
"min": 0.02706811622483656,
"max": 0.05774032569024712,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04855623040348292,
"min": 0.021688893623650077,
"max": 0.062166997128062784,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.09711246080696584,
"min": 0.043377787247300154,
"max": 0.18650099138418835,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.138823620424998e-06,
"min": 4.138823620424998e-06,
"max": 0.00029528482657172506,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.277647240849995e-06,
"min": 8.277647240849995e-06,
"max": 0.0008438013187329001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10137957499999999,
"min": 0.10137957499999999,
"max": 0.19842827500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20275914999999997,
"min": 0.20275914999999997,
"max": 0.5812671000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.884079249999997e-05,
"min": 7.884079249999997e-05,
"max": 0.004921570922499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00015768158499999995,
"min": 0.00015768158499999995,
"max": 0.014065228290000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687603944",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687606335"
},
"total": 2390.6643518050005,
"count": 1,
"self": 0.47996394200072245,
"children": {
"run_training.setup": {
"total": 0.04754145299989432,
"count": 1,
"self": 0.04754145299989432
},
"TrainerController.start_learning": {
"total": 2390.1368464099996,
"count": 1,
"self": 5.53127716194831,
"children": {
"TrainerController._reset_env": {
"total": 4.621382338999865,
"count": 1,
"self": 4.621382338999865
},
"TrainerController.advance": {
"total": 2379.8435687230512,
"count": 230854,
"self": 5.3925610562273505,
"children": {
"env_step": {
"total": 1887.680316230914,
"count": 230854,
"self": 1560.9632661369837,
"children": {
"SubprocessEnvManager._take_step": {
"total": 323.23001154805297,
"count": 230854,
"self": 18.974451728974827,
"children": {
"TorchPolicy.evaluate": {
"total": 304.25555981907814,
"count": 223012,
"self": 304.25555981907814
}
}
},
"workers": {
"total": 3.48703854587734,
"count": 230854,
"self": 0.0,
"children": {
"worker_root": {
"total": 2380.6043879351364,
"count": 230854,
"is_parallel": true,
"self": 1124.5495578142202,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008691689999977825,
"count": 1,
"is_parallel": true,
"self": 0.00028384999973241065,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005853190002653719,
"count": 2,
"is_parallel": true,
"self": 0.0005853190002653719
}
}
},
"UnityEnvironment.step": {
"total": 0.027235964999817952,
"count": 1,
"is_parallel": true,
"self": 0.00028618699957405624,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020085599999219994,
"count": 1,
"is_parallel": true,
"self": 0.00020085599999219994
},
"communicator.exchange": {
"total": 0.026239125000074637,
"count": 1,
"is_parallel": true,
"self": 0.026239125000074637
},
"steps_from_proto": {
"total": 0.0005097970001770591,
"count": 1,
"is_parallel": true,
"self": 0.0001372450001326797,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0003725520000443794,
"count": 2,
"is_parallel": true,
"self": 0.0003725520000443794
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1256.0548301209162,
"count": 230853,
"is_parallel": true,
"self": 39.39816116687052,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 67.39082228197617,
"count": 230853,
"is_parallel": true,
"self": 67.39082228197617
},
"communicator.exchange": {
"total": 1058.862520544,
"count": 230853,
"is_parallel": true,
"self": 1058.862520544
},
"steps_from_proto": {
"total": 90.40332612806947,
"count": 230853,
"is_parallel": true,
"self": 30.28845886011709,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.11486726795238,
"count": 461706,
"is_parallel": true,
"self": 60.11486726795238
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 486.77069143590984,
"count": 230854,
"self": 8.549770689905245,
"children": {
"process_trajectory": {
"total": 128.40532814000267,
"count": 230854,
"self": 127.12913503300206,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2761931070006085,
"count": 10,
"self": 1.2761931070006085
}
}
},
"_update_policy": {
"total": 349.8155926060019,
"count": 96,
"self": 300.49482068799284,
"children": {
"TorchPPOOptimizer.update": {
"total": 49.320771918009086,
"count": 2880,
"self": 49.320771918009086
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.100000190490391e-06,
"count": 1,
"self": 1.100000190490391e-06
},
"TrainerController._save_models": {
"total": 0.1406170860000202,
"count": 1,
"self": 0.0020326740000200516,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13858441200000016,
"count": 1,
"self": 0.13858441200000016
}
}
}
}
}
}
}