ppo-Huggy / run_logs /timers.json
mshamrai's picture
Huggy
4451ad6 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4062851667404175,
"min": 1.4062851667404175,
"max": 1.4267635345458984,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70876.7734375,
"min": 68810.8125,
"max": 77750.390625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 82.09452736318408,
"min": 74.71558245083207,
"max": 404.0403225806452,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49503.0,
"min": 48815.0,
"max": 50268.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999985.0,
"min": 49639.0,
"max": 1999985.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999985.0,
"min": 49639.0,
"max": 1999985.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.535120725631714,
"min": 0.05119509994983673,
"max": 2.535120725631714,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1528.677734375,
"min": 6.2969970703125,
"max": 1598.16796875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9824361308890195,
"min": 1.8783540076356593,
"max": 3.992093539554661,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2401.408986926079,
"min": 231.0375429391861,
"max": 2450.946429133415,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9824361308890195,
"min": 1.8783540076356593,
"max": 3.992093539554661,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2401.408986926079,
"min": 231.0375429391861,
"max": 2450.946429133415,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.0186417122287417,
"min": 0.012241944030392915,
"max": 0.018830988903048112,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.055925136686225105,
"min": 0.02448388806078583,
"max": 0.05649296670914434,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06457546676198642,
"min": 0.02253026943653822,
"max": 0.06504877352466186,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.19372640028595925,
"min": 0.04506053887307644,
"max": 0.19372640028595925,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.3251988916333255e-06,
"min": 3.3251988916333255e-06,
"max": 0.00029534407655197495,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.975596674899976e-06,
"min": 9.975596674899976e-06,
"max": 0.0008440123686625498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10110836666666667,
"min": 0.10110836666666667,
"max": 0.19844802499999997,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3033251,
"min": 0.20734944999999994,
"max": 0.5813374499999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.530749666666654e-05,
"min": 6.530749666666654e-05,
"max": 0.0049225564475,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019592248999999963,
"min": 0.00019592248999999963,
"max": 0.014068738755,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1719501361",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1719503789"
},
"total": 2427.479622027,
"count": 1,
"self": 0.43687208700021074,
"children": {
"run_training.setup": {
"total": 0.06575158099997225,
"count": 1,
"self": 0.06575158099997225
},
"TrainerController.start_learning": {
"total": 2426.976998359,
"count": 1,
"self": 4.5621577700694615,
"children": {
"TrainerController._reset_env": {
"total": 3.252952640999979,
"count": 1,
"self": 3.252952640999979
},
"TrainerController.advance": {
"total": 2419.0396849269305,
"count": 232973,
"self": 4.730027440919002,
"children": {
"env_step": {
"total": 1921.4097175040522,
"count": 232973,
"self": 1588.094527020129,
"children": {
"SubprocessEnvManager._take_step": {
"total": 330.42027980399956,
"count": 232973,
"self": 17.1486115819568,
"children": {
"TorchPolicy.evaluate": {
"total": 313.27166822204276,
"count": 223005,
"self": 313.27166822204276
}
}
},
"workers": {
"total": 2.8949106799237825,
"count": 232973,
"self": 0.0,
"children": {
"worker_root": {
"total": 2419.827667652975,
"count": 232973,
"is_parallel": true,
"self": 1139.3645353120373,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009584389999872656,
"count": 1,
"is_parallel": true,
"self": 0.0002475850000109858,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007108539999762797,
"count": 2,
"is_parallel": true,
"self": 0.0007108539999762797
}
}
},
"UnityEnvironment.step": {
"total": 0.054855083000006744,
"count": 1,
"is_parallel": true,
"self": 0.0004020650000029491,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019396499999402295,
"count": 1,
"is_parallel": true,
"self": 0.00019396499999402295
},
"communicator.exchange": {
"total": 0.053539468999986184,
"count": 1,
"is_parallel": true,
"self": 0.053539468999986184
},
"steps_from_proto": {
"total": 0.0007195840000235876,
"count": 1,
"is_parallel": true,
"self": 0.00017109600003095693,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005484879999926306,
"count": 2,
"is_parallel": true,
"self": 0.0005484879999926306
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1280.4631323409376,
"count": 232972,
"is_parallel": true,
"self": 39.70078395009614,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.48242680193971,
"count": 232972,
"is_parallel": true,
"self": 81.48242680193971
},
"communicator.exchange": {
"total": 1068.4441217799615,
"count": 232972,
"is_parallel": true,
"self": 1068.4441217799615
},
"steps_from_proto": {
"total": 90.8357998089403,
"count": 232972,
"is_parallel": true,
"self": 32.26868928277662,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.567110526163674,
"count": 465944,
"is_parallel": true,
"self": 58.567110526163674
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 492.8999399819592,
"count": 232973,
"self": 6.8194123470208865,
"children": {
"process_trajectory": {
"total": 158.93794283293732,
"count": 232973,
"self": 157.6511153329377,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2868274999996174,
"count": 10,
"self": 1.2868274999996174
}
}
},
"_update_policy": {
"total": 327.142584802001,
"count": 97,
"self": 262.24208728800255,
"children": {
"TorchPPOOptimizer.update": {
"total": 64.90049751399846,
"count": 2910,
"self": 64.90049751399846
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.210002644977067e-07,
"count": 1,
"self": 9.210002644977067e-07
},
"TrainerController._save_models": {
"total": 0.12220209999986764,
"count": 1,
"self": 0.001952413999788405,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12024968600007924,
"count": 1,
"self": 0.12024968600007924
}
}
}
}
}
}
}