ppo-Huggy / run_logs /timers.json
MikeZ3's picture
Huggy
ed8e977 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.403747797012329,
"min": 1.403747797012329,
"max": 1.428682565689087,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68377.9609375,
"min": 68377.9609375,
"max": 77491.46875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 79.59032258064516,
"min": 69.0013986013986,
"max": 400.568,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49346.0,
"min": 49178.0,
"max": 50071.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999915.0,
"min": 49849.0,
"max": 1999915.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999915.0,
"min": 49849.0,
"max": 1999915.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.452906847000122,
"min": 0.09449683874845505,
"max": 2.56415057182312,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1520.80224609375,
"min": 11.717608451843262,
"max": 1761.170166015625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7561427861452104,
"min": 1.7071929501910363,
"max": 4.055413291076761,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2328.8085274100304,
"min": 211.6919258236885,
"max": 2770.19799888134,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7561427861452104,
"min": 1.7071929501910363,
"max": 4.055413291076761,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2328.8085274100304,
"min": 211.6919258236885,
"max": 2770.19799888134,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01355712976065762,
"min": 0.01355712976065762,
"max": 0.0191361604977121,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04067138928197286,
"min": 0.02860584242832071,
"max": 0.0574084814931363,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05360094259182612,
"min": 0.021782644900182883,
"max": 0.060680550833543144,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16080282777547836,
"min": 0.04356528980036577,
"max": 0.17387855648994444,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.779348740250013e-06,
"min": 3.779348740250013e-06,
"max": 0.00029533867655377494,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1338046220750038e-05,
"min": 1.1338046220750038e-05,
"max": 0.00084427396857535,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10125975,
"min": 0.10125975,
"max": 0.198446225,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30377925,
"min": 0.20765480000000006,
"max": 0.58142465,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.286152500000021e-05,
"min": 7.286152500000021e-05,
"max": 0.004922466627500002,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021858457500000062,
"min": 0.00021858457500000062,
"max": 0.014073090035000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740562870",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1740565358"
},
"total": 2487.69487519,
"count": 1,
"self": 0.4374910199999249,
"children": {
"run_training.setup": {
"total": 0.022877945999994154,
"count": 1,
"self": 0.022877945999994154
},
"TrainerController.start_learning": {
"total": 2487.234506224,
"count": 1,
"self": 4.522452727104792,
"children": {
"TrainerController._reset_env": {
"total": 2.876274370000033,
"count": 1,
"self": 2.876274370000033
},
"TrainerController.advance": {
"total": 2479.7170335658952,
"count": 233458,
"self": 4.766873328882866,
"children": {
"env_step": {
"total": 1984.4477967150444,
"count": 233458,
"self": 1554.5380759751806,
"children": {
"SubprocessEnvManager._take_step": {
"total": 427.209927297946,
"count": 233458,
"self": 15.798232839901402,
"children": {
"TorchPolicy.evaluate": {
"total": 411.4116944580446,
"count": 222897,
"self": 411.4116944580446
}
}
},
"workers": {
"total": 2.699793441917791,
"count": 233458,
"self": 0.0,
"children": {
"worker_root": {
"total": 2479.8807509949,
"count": 233458,
"is_parallel": true,
"self": 1213.4109909558542,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001148931999978231,
"count": 1,
"is_parallel": true,
"self": 0.0003865720001385853,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007623599998396458,
"count": 2,
"is_parallel": true,
"self": 0.0007623599998396458
}
}
},
"UnityEnvironment.step": {
"total": 0.02974997599994822,
"count": 1,
"is_parallel": true,
"self": 0.0003025919999117832,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018563399999038666,
"count": 1,
"is_parallel": true,
"self": 0.00018563399999038666
},
"communicator.exchange": {
"total": 0.02857234200007497,
"count": 1,
"is_parallel": true,
"self": 0.02857234200007497
},
"steps_from_proto": {
"total": 0.0006894079999710812,
"count": 1,
"is_parallel": true,
"self": 0.0002168109999729495,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00047259699999813165,
"count": 2,
"is_parallel": true,
"self": 0.00047259699999813165
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1266.469760039046,
"count": 233457,
"is_parallel": true,
"self": 37.70052698987911,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.35184171707226,
"count": 233457,
"is_parallel": true,
"self": 83.35184171707226
},
"communicator.exchange": {
"total": 1056.2987670819734,
"count": 233457,
"is_parallel": true,
"self": 1056.2987670819734
},
"steps_from_proto": {
"total": 89.11862425012123,
"count": 233457,
"is_parallel": true,
"self": 33.490061004268455,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.628563245852774,
"count": 466914,
"is_parallel": true,
"self": 55.628563245852774
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 490.50236352196805,
"count": 233458,
"self": 6.6977445829655835,
"children": {
"process_trajectory": {
"total": 172.8328825690022,
"count": 233458,
"self": 171.45929092400308,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3735916449991237,
"count": 10,
"self": 1.3735916449991237
}
}
},
"_update_policy": {
"total": 310.97173637000026,
"count": 97,
"self": 247.1928761759957,
"children": {
"TorchPPOOptimizer.update": {
"total": 63.77886019400455,
"count": 2910,
"self": 63.77886019400455
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6880003386177123e-06,
"count": 1,
"self": 1.6880003386177123e-06
},
"TrainerController._save_models": {
"total": 0.11874387299985756,
"count": 1,
"self": 0.0018781080002554518,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1168657649996021,
"count": 1,
"self": 0.1168657649996021
}
}
}
}
}
}
}