ppo-Huggy / run_logs /timers.json
gnonguichi's picture
Huggy 1
1ad6d58
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4047698974609375,
"min": 1.4047698974609375,
"max": 1.4239633083343506,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70256.7578125,
"min": 68634.5625,
"max": 79067.7734375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 69.51763046544428,
"min": 66.87771739130434,
"max": 382.09923664122135,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49288.0,
"min": 49150.0,
"max": 50055.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999854.0,
"min": 49658.0,
"max": 1999854.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999854.0,
"min": 49658.0,
"max": 1999854.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.542379379272461,
"min": -0.10195692628622055,
"max": 2.5425751209259033,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1802.5469970703125,
"min": -13.254400253295898,
"max": 1863.70751953125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 4.074287688866657,
"min": 1.8461432173848151,
"max": 4.074287688866657,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2888.66997140646,
"min": 239.99861826002598,
"max": 2954.5003829598427,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 4.074287688866657,
"min": 1.8461432173848151,
"max": 4.074287688866657,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2888.66997140646,
"min": 239.99861826002598,
"max": 2954.5003829598427,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01594921103478555,
"min": 0.013498708477062691,
"max": 0.019977029754469793,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04784763310435665,
"min": 0.028048776986543088,
"max": 0.058334406723345944,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.056770237162709236,
"min": 0.025725596491247414,
"max": 0.06595451217144728,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1703107114881277,
"min": 0.05145119298249483,
"max": 0.1724211280544599,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.998748667116671e-06,
"min": 3.998748667116671e-06,
"max": 0.00029528325157224993,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1996246001350013e-05,
"min": 1.1996246001350013e-05,
"max": 0.0008439033186989002,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10133288333333336,
"min": 0.10133288333333336,
"max": 0.19842775,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3039986500000001,
"min": 0.20779494999999998,
"max": 0.5813011,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.651087833333339e-05,
"min": 7.651087833333339e-05,
"max": 0.004921544725,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022953263500000018,
"min": 0.00022953263500000018,
"max": 0.014066924890000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680172872",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680175152"
},
"total": 2279.331749431,
"count": 1,
"self": 0.42629323800019847,
"children": {
"run_training.setup": {
"total": 0.11171033499977057,
"count": 1,
"self": 0.11171033499977057
},
"TrainerController.start_learning": {
"total": 2278.793745858,
"count": 1,
"self": 4.115571731847012,
"children": {
"TrainerController._reset_env": {
"total": 9.679640471999846,
"count": 1,
"self": 9.679640471999846
},
"TrainerController.advance": {
"total": 2264.8818736211533,
"count": 233768,
"self": 4.435717644996203,
"children": {
"env_step": {
"total": 1764.9196439521738,
"count": 233768,
"self": 1490.1680587687006,
"children": {
"SubprocessEnvManager._take_step": {
"total": 272.0654882232816,
"count": 233768,
"self": 16.36639671528428,
"children": {
"TorchPolicy.evaluate": {
"total": 255.6990915079973,
"count": 222917,
"self": 255.6990915079973
}
}
},
"workers": {
"total": 2.686096960191662,
"count": 233768,
"self": 0.0,
"children": {
"worker_root": {
"total": 2271.06421355986,
"count": 233768,
"is_parallel": true,
"self": 1054.4897893448888,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0007895260000623239,
"count": 1,
"is_parallel": true,
"self": 0.00021261400024741306,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005769119998149108,
"count": 2,
"is_parallel": true,
"self": 0.0005769119998149108
}
}
},
"UnityEnvironment.step": {
"total": 0.02954792799982897,
"count": 1,
"is_parallel": true,
"self": 0.000365017000149237,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019471099994916585,
"count": 1,
"is_parallel": true,
"self": 0.00019471099994916585
},
"communicator.exchange": {
"total": 0.028289139999742474,
"count": 1,
"is_parallel": true,
"self": 0.028289139999742474
},
"steps_from_proto": {
"total": 0.0006990599999880942,
"count": 1,
"is_parallel": true,
"self": 0.00020687100004579406,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004921889999423001,
"count": 2,
"is_parallel": true,
"self": 0.0004921889999423001
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1216.5744242149713,
"count": 233767,
"is_parallel": true,
"self": 37.53528575452492,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.09799459779833,
"count": 233767,
"is_parallel": true,
"self": 76.09799459779833
},
"communicator.exchange": {
"total": 1014.8621175118615,
"count": 233767,
"is_parallel": true,
"self": 1014.8621175118615
},
"steps_from_proto": {
"total": 88.07902635078653,
"count": 233767,
"is_parallel": true,
"self": 33.14308436971987,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.93594198106666,
"count": 467534,
"is_parallel": true,
"self": 54.93594198106666
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 495.5265120239833,
"count": 233768,
"self": 6.377986877175317,
"children": {
"process_trajectory": {
"total": 144.0972302688083,
"count": 233768,
"self": 142.68516894680806,
"children": {
"RLTrainer._checkpoint": {
"total": 1.412061322000227,
"count": 10,
"self": 1.412061322000227
}
}
},
"_update_policy": {
"total": 345.0512948779997,
"count": 97,
"self": 288.01382605798517,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.03746882001451,
"count": 2910,
"self": 57.03746882001451
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.180002962239087e-07,
"count": 1,
"self": 9.180002962239087e-07
},
"TrainerController._save_models": {
"total": 0.11665911499949289,
"count": 1,
"self": 0.0020325919995229924,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1146265229999699,
"count": 1,
"self": 0.1146265229999699
}
}
}
}
}
}
}