pco-huggy / run_logs /timers.json
ummagumm-a's picture
Huggy
c895a19
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4018433094024658,
"min": 1.4018433094024658,
"max": 1.4277427196502686,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69115.078125,
"min": 68636.7421875,
"max": 75893.6796875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 82.17171717171718,
"min": 78.35955056179775,
"max": 403.03225806451616,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48810.0,
"min": 48810.0,
"max": 49978.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999944.0,
"min": 49684.0,
"max": 1999944.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999944.0,
"min": 49684.0,
"max": 1999944.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.453338623046875,
"min": -0.04823176562786102,
"max": 2.4573044776916504,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1457.283203125,
"min": -5.932507038116455,
"max": 1505.253662109375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.829572378083913,
"min": 1.5369466848489715,
"max": 3.9741147357718387,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2274.7659925818443,
"min": 189.0444422364235,
"max": 2389.2634751796722,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.829572378083913,
"min": 1.5369466848489715,
"max": 3.9741147357718387,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2274.7659925818443,
"min": 189.0444422364235,
"max": 2389.2634751796722,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015164625430608996,
"min": 0.014341046778038921,
"max": 0.021561302426319647,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04549387629182699,
"min": 0.028682093556077842,
"max": 0.06253359501133673,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05338211419681708,
"min": 0.022740358517815668,
"max": 0.06378283469627301,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16014634259045124,
"min": 0.045480717035631335,
"max": 0.1907971365998189,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6101487966499986e-06,
"min": 3.6101487966499986e-06,
"max": 0.0002953293015569,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0830446389949996e-05,
"min": 1.0830446389949996e-05,
"max": 0.0008439970686676499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10120335000000001,
"min": 0.10120335000000001,
"max": 0.1984431,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30361005,
"min": 0.20753405000000003,
"max": 0.58133235,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.004716499999997e-05,
"min": 7.004716499999997e-05,
"max": 0.004922310690000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002101414949999999,
"min": 0.0002101414949999999,
"max": 0.014068484264999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679652681",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/home/ummagumma/personal/huggingface_rl/env/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679654814"
},
"total": 2132.8677174355835,
"count": 1,
"self": 0.42084123380482197,
"children": {
"run_training.setup": {
"total": 0.018077922984957695,
"count": 1,
"self": 0.018077922984957695
},
"TrainerController.start_learning": {
"total": 2132.4287982787937,
"count": 1,
"self": 2.822715474292636,
"children": {
"TrainerController._reset_env": {
"total": 3.0702766459435225,
"count": 1,
"self": 3.0702766459435225
},
"TrainerController.advance": {
"total": 2126.4159332159907,
"count": 232420,
"self": 3.013344781473279,
"children": {
"env_step": {
"total": 1807.297152152285,
"count": 232420,
"self": 1621.012469097972,
"children": {
"SubprocessEnvManager._take_step": {
"total": 184.37693233042955,
"count": 232420,
"self": 10.862324165180326,
"children": {
"TorchPolicy.evaluate": {
"total": 173.51460816524923,
"count": 222903,
"self": 173.51460816524923
}
}
},
"workers": {
"total": 1.9077507238835096,
"count": 232420,
"self": 0.0,
"children": {
"worker_root": {
"total": 2124.0008942857385,
"count": 232420,
"is_parallel": true,
"self": 751.9864830896258,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001384492963552475,
"count": 1,
"is_parallel": true,
"self": 0.00041896291077136993,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000965530052781105,
"count": 2,
"is_parallel": true,
"self": 0.000965530052781105
}
}
},
"UnityEnvironment.step": {
"total": 0.032528094947338104,
"count": 1,
"is_parallel": true,
"self": 0.0004730653017759323,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002926420420408249,
"count": 1,
"is_parallel": true,
"self": 0.0002926420420408249
},
"communicator.exchange": {
"total": 0.03077707439661026,
"count": 1,
"is_parallel": true,
"self": 0.03077707439661026
},
"steps_from_proto": {
"total": 0.000985313206911087,
"count": 1,
"is_parallel": true,
"self": 0.0002942606806755066,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006910525262355804,
"count": 2,
"is_parallel": true,
"self": 0.0006910525262355804
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1372.0144111961126,
"count": 232419,
"is_parallel": true,
"self": 41.58648641221225,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.28991876915097,
"count": 232419,
"is_parallel": true,
"self": 83.28991876915097
},
"communicator.exchange": {
"total": 1152.861523438245,
"count": 232419,
"is_parallel": true,
"self": 1152.861523438245
},
"steps_from_proto": {
"total": 94.27648257650435,
"count": 232419,
"is_parallel": true,
"self": 36.11769083701074,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.15879173949361,
"count": 464838,
"is_parallel": true,
"self": 58.15879173949361
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 316.1054362822324,
"count": 232420,
"self": 3.970396926626563,
"children": {
"process_trajectory": {
"total": 81.05346230231225,
"count": 232420,
"self": 79.86158644594252,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1918758563697338,
"count": 10,
"self": 1.1918758563697338
}
}
},
"_update_policy": {
"total": 231.08157705329359,
"count": 97,
"self": 189.95173043571413,
"children": {
"TorchPPOOptimizer.update": {
"total": 41.12984661757946,
"count": 2910,
"self": 41.12984661757946
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.791685104370117e-07,
"count": 1,
"self": 8.791685104370117e-07
},
"TrainerController._save_models": {
"total": 0.1198720633983612,
"count": 1,
"self": 0.001544758677482605,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1183273047208786,
"count": 1,
"self": 0.1183273047208786
}
}
}
}
}
}
}