ppo-Huggy / run_logs /timers.json
Spur1ous's picture
Huggy
34ae942 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4083560705184937,
"min": 1.4083560705184937,
"max": 1.4304262399673462,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69974.171875,
"min": 67264.5625,
"max": 77501.7578125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 110.06681514476615,
"min": 91.04096834264432,
"max": 409.2520325203252,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49420.0,
"min": 48864.0,
"max": 50338.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999909.0,
"min": 49885.0,
"max": 1999909.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999909.0,
"min": 49885.0,
"max": 1999909.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.336402177810669,
"min": 0.010859047994017601,
"max": 2.4235692024230957,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1049.0445556640625,
"min": 1.3248038291931152,
"max": 1291.9776611328125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.519974739620574,
"min": 1.6862260840955328,
"max": 3.8304605906934532,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1580.4686580896378,
"min": 205.719582259655,
"max": 2002.9133558571339,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.519974739620574,
"min": 1.6862260840955328,
"max": 3.8304605906934532,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1580.4686580896378,
"min": 205.719582259655,
"max": 2002.9133558571339,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017674006724135122,
"min": 0.012898803724844279,
"max": 0.021337292034877463,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05302202017240537,
"min": 0.025797607449688557,
"max": 0.06205983057540531,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04916503375603093,
"min": 0.02210560841485858,
"max": 0.07417824847830666,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1474951012680928,
"min": 0.04421121682971716,
"max": 0.22253474543492,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1721989426333325e-06,
"min": 3.1721989426333325e-06,
"max": 0.00029535442654852497,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.516596827899998e-06,
"min": 9.516596827899998e-06,
"max": 0.0008441836686054497,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10105736666666665,
"min": 0.10105736666666665,
"max": 0.19845147500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30317209999999994,
"min": 0.20724129999999996,
"max": 0.5813945500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.276259666666667e-05,
"min": 6.276259666666667e-05,
"max": 0.004922728602499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018828779,
"min": 0.00018828779,
"max": 0.014071588045000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740041841",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1740044303"
},
"total": 2462.364942156,
"count": 1,
"self": 0.437250332000076,
"children": {
"run_training.setup": {
"total": 0.0226165380000225,
"count": 1,
"self": 0.0226165380000225
},
"TrainerController.start_learning": {
"total": 2461.905075286,
"count": 1,
"self": 4.349644490037463,
"children": {
"TrainerController._reset_env": {
"total": 3.095244182999977,
"count": 1,
"self": 3.095244182999977
},
"TrainerController.advance": {
"total": 2454.3314498149625,
"count": 231031,
"self": 4.505896737016428,
"children": {
"env_step": {
"total": 1980.8417426669516,
"count": 231031,
"self": 1546.5300932768068,
"children": {
"SubprocessEnvManager._take_step": {
"total": 431.62646183009736,
"count": 231031,
"self": 15.600386135108636,
"children": {
"TorchPolicy.evaluate": {
"total": 416.0260756949887,
"count": 223054,
"self": 416.0260756949887
}
}
},
"workers": {
"total": 2.6851875600474955,
"count": 231031,
"self": 0.0,
"children": {
"worker_root": {
"total": 2454.2459951669493,
"count": 231031,
"is_parallel": true,
"self": 1191.6664728429535,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011242090000109783,
"count": 1,
"is_parallel": true,
"self": 0.0004720530000099643,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000652156000001014,
"count": 2,
"is_parallel": true,
"self": 0.000652156000001014
}
}
},
"UnityEnvironment.step": {
"total": 0.029257400000005873,
"count": 1,
"is_parallel": true,
"self": 0.0003267519999781143,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019281499999124208,
"count": 1,
"is_parallel": true,
"self": 0.00019281499999124208
},
"communicator.exchange": {
"total": 0.028001934000030815,
"count": 1,
"is_parallel": true,
"self": 0.028001934000030815
},
"steps_from_proto": {
"total": 0.000735899000005702,
"count": 1,
"is_parallel": true,
"self": 0.0001977189999706752,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005381800000350268,
"count": 2,
"is_parallel": true,
"self": 0.0005381800000350268
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1262.579522323996,
"count": 231030,
"is_parallel": true,
"self": 37.65821425190393,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.70746142707685,
"count": 231030,
"is_parallel": true,
"self": 82.70746142707685
},
"communicator.exchange": {
"total": 1053.7418607969987,
"count": 231030,
"is_parallel": true,
"self": 1053.7418607969987
},
"steps_from_proto": {
"total": 88.47198584801646,
"count": 231030,
"is_parallel": true,
"self": 33.11497553488846,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.357010313128,
"count": 462060,
"is_parallel": true,
"self": 55.357010313128
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 468.98381041099424,
"count": 231031,
"self": 6.82920118496537,
"children": {
"process_trajectory": {
"total": 159.0835814200285,
"count": 231031,
"self": 157.71353782502808,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3700435950004248,
"count": 10,
"self": 1.3700435950004248
}
}
},
"_update_policy": {
"total": 303.07102780600036,
"count": 97,
"self": 238.82953429699938,
"children": {
"TorchPPOOptimizer.update": {
"total": 64.24149350900097,
"count": 2910,
"self": 64.24149350900097
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.199000053020427e-06,
"count": 1,
"self": 1.199000053020427e-06
},
"TrainerController._save_models": {
"total": 0.12873559899981046,
"count": 1,
"self": 0.0019624499996098166,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12677314900020065,
"count": 1,
"self": 0.12677314900020065
}
}
}
}
}
}
}