Unit1-Huggy / run_logs /timers.json
le-Greg's picture
Huggy
35b5742 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4041694402694702,
"min": 1.4041694402694702,
"max": 1.425337314605713,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70264.640625,
"min": 67757.09375,
"max": 76777.25,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 111.80135440180587,
"min": 92.12218045112782,
"max": 364.985401459854,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49528.0,
"min": 48805.0,
"max": 50079.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999795.0,
"min": 49774.0,
"max": 1999795.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999795.0,
"min": 49774.0,
"max": 1999795.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.343100070953369,
"min": 0.08046874403953552,
"max": 2.41450834274292,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1037.9932861328125,
"min": 10.94374942779541,
"max": 1272.6444091796875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6492546173842713,
"min": 1.7055706180193846,
"max": 3.905471442764952,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1616.6197955012321,
"min": 231.9576040506363,
"max": 2016.7303419709206,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6492546173842713,
"min": 1.7055706180193846,
"max": 3.905471442764952,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1616.6197955012321,
"min": 231.9576040506363,
"max": 2016.7303419709206,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015851576804965992,
"min": 0.01383512289612554,
"max": 0.019979001509879402,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04755473041489798,
"min": 0.02771488609335696,
"max": 0.05993700452963821,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.048364643131693204,
"min": 0.022282409564488462,
"max": 0.05656962055299017,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14509392939507962,
"min": 0.046123291738331317,
"max": 0.1697088616589705,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.3851488716500016e-06,
"min": 3.3851488716500016e-06,
"max": 0.00029535870154709994,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0155446614950004e-05,
"min": 1.0155446614950004e-05,
"max": 0.0008443917185360999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10112835000000002,
"min": 0.10112835000000002,
"max": 0.1984529,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30338505000000004,
"min": 0.20745399999999997,
"max": 0.5814639000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.630466500000002e-05,
"min": 6.630466500000002e-05,
"max": 0.00492279971,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019891399500000006,
"min": 0.00019891399500000006,
"max": 0.014075048610000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704988361",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn --force ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704990852"
},
"total": 2491.564166077,
"count": 1,
"self": 0.6991997050004102,
"children": {
"run_training.setup": {
"total": 0.04766274300004625,
"count": 1,
"self": 0.04766274300004625
},
"TrainerController.start_learning": {
"total": 2490.817303629,
"count": 1,
"self": 4.450312191974717,
"children": {
"TrainerController._reset_env": {
"total": 3.3024571180001203,
"count": 1,
"self": 3.3024571180001203
},
"TrainerController.advance": {
"total": 2482.9022821210256,
"count": 231253,
"self": 4.7796277309348625,
"children": {
"env_step": {
"total": 1967.6777370330371,
"count": 231253,
"self": 1638.5189408912265,
"children": {
"SubprocessEnvManager._take_step": {
"total": 326.26396600081534,
"count": 231253,
"self": 17.41607904779289,
"children": {
"TorchPolicy.evaluate": {
"total": 308.84788695302245,
"count": 222995,
"self": 308.84788695302245
}
}
},
"workers": {
"total": 2.894830140995282,
"count": 231253,
"self": 0.0,
"children": {
"worker_root": {
"total": 2483.24652311594,
"count": 231253,
"is_parallel": true,
"self": 1148.6235339989735,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0007014790000994253,
"count": 1,
"is_parallel": true,
"self": 0.0001916350001920364,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005098439999073889,
"count": 2,
"is_parallel": true,
"self": 0.0005098439999073889
}
}
},
"UnityEnvironment.step": {
"total": 0.0295876140000928,
"count": 1,
"is_parallel": true,
"self": 0.0003171010002915864,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019022599985873967,
"count": 1,
"is_parallel": true,
"self": 0.00019022599985873967
},
"communicator.exchange": {
"total": 0.02839399999993475,
"count": 1,
"is_parallel": true,
"self": 0.02839399999993475
},
"steps_from_proto": {
"total": 0.0006862870000077237,
"count": 1,
"is_parallel": true,
"self": 0.0001846580000801623,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005016289999275614,
"count": 2,
"is_parallel": true,
"self": 0.0005016289999275614
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1334.6229891169664,
"count": 231252,
"is_parallel": true,
"self": 41.20219448096577,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.35887513394846,
"count": 231252,
"is_parallel": true,
"self": 83.35887513394846
},
"communicator.exchange": {
"total": 1118.6961078990403,
"count": 231252,
"is_parallel": true,
"self": 1118.6961078990403
},
"steps_from_proto": {
"total": 91.36581160301193,
"count": 231252,
"is_parallel": true,
"self": 32.128034641142904,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.23777696186903,
"count": 462504,
"is_parallel": true,
"self": 59.23777696186903
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 510.44491735705355,
"count": 231253,
"self": 7.253422288968295,
"children": {
"process_trajectory": {
"total": 150.6522222600861,
"count": 231253,
"self": 149.41468079208562,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2375414680004724,
"count": 10,
"self": 1.2375414680004724
}
}
},
"_update_policy": {
"total": 352.53927280799917,
"count": 97,
"self": 286.5636865789925,
"children": {
"TorchPPOOptimizer.update": {
"total": 65.9755862290067,
"count": 2910,
"self": 65.9755862290067
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2309997146076057e-06,
"count": 1,
"self": 1.2309997146076057e-06
},
"TrainerController._save_models": {
"total": 0.16225096699963615,
"count": 1,
"self": 0.0036298669997449906,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15862109999989116,
"count": 1,
"self": 0.15862109999989116
}
}
}
}
}
}
}