ppo-Huggy / run_logs /timers.json
eoulster's picture
Huggy
fc2ae37
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4049715995788574,
"min": 1.4049715995788574,
"max": 1.4286478757858276,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72660.9140625,
"min": 68255.8984375,
"max": 75072.1484375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.63195691202873,
"min": 72.94526627218934,
"max": 387.2015503875969,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49368.0,
"min": 48697.0,
"max": 50179.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999993.0,
"min": 49772.0,
"max": 1999993.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999993.0,
"min": 49772.0,
"max": 1999993.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.478121519088745,
"min": -0.010616051033139229,
"max": 2.557995080947876,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1380.313720703125,
"min": -1.3588545322418213,
"max": 1636.3646240234375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8048854830877143,
"min": 1.774566610576585,
"max": 4.022371498556925,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2119.321214079857,
"min": 227.14452615380287,
"max": 2556.765806555748,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8048854830877143,
"min": 1.774566610576585,
"max": 4.022371498556925,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2119.321214079857,
"min": 227.14452615380287,
"max": 2556.765806555748,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01748927694732427,
"min": 0.013036049730079767,
"max": 0.01980047491233563,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.052467830841972804,
"min": 0.026713657641084868,
"max": 0.059331685896419614,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05196912905408276,
"min": 0.024153807386755943,
"max": 0.06573047817995151,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15590738716224828,
"min": 0.048307614773511885,
"max": 0.19047911750773588,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5992988002666687e-06,
"min": 3.5992988002666687e-06,
"max": 0.00029528632657122494,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0797896400800007e-05,
"min": 1.0797896400800007e-05,
"max": 0.0008434657688447501,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10119973333333336,
"min": 0.10119973333333336,
"max": 0.198428775,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30359920000000007,
"min": 0.20760645000000003,
"max": 0.58115525,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.986669333333338e-05,
"min": 6.986669333333338e-05,
"max": 0.004921595872499998,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020960008000000017,
"min": 0.00020960008000000017,
"max": 0.014059646975,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678043073",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678045501"
},
"total": 2427.76280871,
"count": 1,
"self": 0.45374622700001055,
"children": {
"run_training.setup": {
"total": 0.12194257200002312,
"count": 1,
"self": 0.12194257200002312
},
"TrainerController.start_learning": {
"total": 2427.187119911,
"count": 1,
"self": 4.302803589065661,
"children": {
"TrainerController._reset_env": {
"total": 10.548423255000046,
"count": 1,
"self": 10.548423255000046
},
"TrainerController.advance": {
"total": 2412.227902591934,
"count": 232943,
"self": 4.700148339904899,
"children": {
"env_step": {
"total": 1882.0974023650158,
"count": 232943,
"self": 1571.1836537830507,
"children": {
"SubprocessEnvManager._take_step": {
"total": 308.0378810770137,
"count": 232943,
"self": 16.511509878982963,
"children": {
"TorchPolicy.evaluate": {
"total": 291.52637119803074,
"count": 223134,
"self": 73.5617388921674,
"children": {
"TorchPolicy.sample_actions": {
"total": 217.96463230586335,
"count": 223134,
"self": 217.96463230586335
}
}
}
}
},
"workers": {
"total": 2.8758675049514295,
"count": 232943,
"self": 0.0,
"children": {
"worker_root": {
"total": 2418.54803109905,
"count": 232943,
"is_parallel": true,
"self": 1144.4727800370829,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010128580000241527,
"count": 1,
"is_parallel": true,
"self": 0.00042955999998639527,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005832980000377574,
"count": 2,
"is_parallel": true,
"self": 0.0005832980000377574
}
}
},
"UnityEnvironment.step": {
"total": 0.0517643090000206,
"count": 1,
"is_parallel": true,
"self": 0.000347232000024178,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002129730000319796,
"count": 1,
"is_parallel": true,
"self": 0.0002129730000319796
},
"communicator.exchange": {
"total": 0.048998489999974026,
"count": 1,
"is_parallel": true,
"self": 0.048998489999974026
},
"steps_from_proto": {
"total": 0.002205613999990419,
"count": 1,
"is_parallel": true,
"self": 0.0002860730000406875,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019195409999497315,
"count": 2,
"is_parallel": true,
"self": 0.0019195409999497315
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1274.0752510619673,
"count": 232942,
"is_parallel": true,
"self": 38.535441499949,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.45659717097618,
"count": 232942,
"is_parallel": true,
"self": 84.45659717097618
},
"communicator.exchange": {
"total": 1058.4132093310934,
"count": 232942,
"is_parallel": true,
"self": 1058.4132093310934
},
"steps_from_proto": {
"total": 92.67000305994867,
"count": 232942,
"is_parallel": true,
"self": 39.79842431793088,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.87157874201779,
"count": 465884,
"is_parallel": true,
"self": 52.87157874201779
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 525.4303518870131,
"count": 232943,
"self": 6.422734288100401,
"children": {
"process_trajectory": {
"total": 171.50760278791063,
"count": 232943,
"self": 170.11657847991046,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3910243080001692,
"count": 10,
"self": 1.3910243080001692
}
}
},
"_update_policy": {
"total": 347.50001481100213,
"count": 97,
"self": 290.9336414669999,
"children": {
"TorchPPOOptimizer.update": {
"total": 56.566373344002216,
"count": 2910,
"self": 56.566373344002216
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.64000264502829e-07,
"count": 1,
"self": 9.64000264502829e-07
},
"TrainerController._save_models": {
"total": 0.10798951100014165,
"count": 1,
"self": 0.0019533139998202387,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10603619700032141,
"count": 1,
"self": 0.10603619700032141
}
}
}
}
}
}
}