ppo-Huggy / run_logs /timers.json
Emperor's picture
Huggy
79f3455
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4016183614730835,
"min": 1.4016183614730835,
"max": 1.429039478302002,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71618.4921875,
"min": 68416.9375,
"max": 76358.53125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 84.33105802047781,
"min": 77.61006289308176,
"max": 386.04615384615386,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49418.0,
"min": 49303.0,
"max": 50186.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999957.0,
"min": 49834.0,
"max": 1999957.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999957.0,
"min": 49834.0,
"max": 1999957.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.414539098739624,
"min": 0.05287278816103935,
"max": 2.4720962047576904,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1414.919921875,
"min": 6.820589542388916,
"max": 1541.808349609375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7124319603418736,
"min": 1.844498962279438,
"max": 3.9577890626745287,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2175.485128760338,
"min": 237.9403661340475,
"max": 2450.7124643325806,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7124319603418736,
"min": 1.844498962279438,
"max": 3.9577890626745287,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2175.485128760338,
"min": 237.9403661340475,
"max": 2450.7124643325806,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01655630690495147,
"min": 0.013929722543495397,
"max": 0.019668362911518973,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04966892071485442,
"min": 0.027859445086990793,
"max": 0.05606024116956784,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.051025004436572395,
"min": 0.02261689690252145,
"max": 0.07482540868222713,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15307501330971718,
"min": 0.0452337938050429,
"max": 0.21638623798886936,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4934488355500053e-06,
"min": 3.4934488355500053e-06,
"max": 0.000295347526550825,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0480346506650016e-05,
"min": 1.0480346506650016e-05,
"max": 0.000844143018619,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10116445,
"min": 0.10116445,
"max": 0.1984491750000001,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30349335,
"min": 0.2075182,
"max": 0.5813810000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.810605500000011e-05,
"min": 6.810605500000011e-05,
"max": 0.0049226138325000005,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002043181650000003,
"min": 0.0002043181650000003,
"max": 0.014070911900000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678224119",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678226555"
},
"total": 2435.725859467,
"count": 1,
"self": 0.4412880459999542,
"children": {
"run_training.setup": {
"total": 0.11208064200002354,
"count": 1,
"self": 0.11208064200002354
},
"TrainerController.start_learning": {
"total": 2435.1724907790003,
"count": 1,
"self": 4.282834502935657,
"children": {
"TrainerController._reset_env": {
"total": 10.46941585799999,
"count": 1,
"self": 10.46941585799999
},
"TrainerController.advance": {
"total": 2420.299546449065,
"count": 232825,
"self": 4.560012761201506,
"children": {
"env_step": {
"total": 1873.5455306199426,
"count": 232825,
"self": 1563.7748840159636,
"children": {
"SubprocessEnvManager._take_step": {
"total": 306.899889093974,
"count": 232825,
"self": 16.300362326971992,
"children": {
"TorchPolicy.evaluate": {
"total": 290.59952676700203,
"count": 223077,
"self": 73.36514302500404,
"children": {
"TorchPolicy.sample_actions": {
"total": 217.234383741998,
"count": 223077,
"self": 217.234383741998
}
}
}
}
},
"workers": {
"total": 2.870757510004978,
"count": 232825,
"self": 0.0,
"children": {
"worker_root": {
"total": 2426.7499920290134,
"count": 232825,
"is_parallel": true,
"self": 1156.740721588191,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008469260000083523,
"count": 1,
"is_parallel": true,
"self": 0.0002957170000286169,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005512089999797354,
"count": 2,
"is_parallel": true,
"self": 0.0005512089999797354
}
}
},
"UnityEnvironment.step": {
"total": 0.04141029200002322,
"count": 1,
"is_parallel": true,
"self": 0.0002945870000985451,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019582400000217604,
"count": 1,
"is_parallel": true,
"self": 0.00019582400000217604
},
"communicator.exchange": {
"total": 0.04013729599995486,
"count": 1,
"is_parallel": true,
"self": 0.04013729599995486
},
"steps_from_proto": {
"total": 0.0007825849999676393,
"count": 1,
"is_parallel": true,
"self": 0.0002555279999114646,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005270570000561747,
"count": 2,
"is_parallel": true,
"self": 0.0005270570000561747
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1270.0092704408223,
"count": 232824,
"is_parallel": true,
"self": 38.51624715301682,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.80901931488273,
"count": 232824,
"is_parallel": true,
"self": 78.80901931488273
},
"communicator.exchange": {
"total": 1060.9074607320335,
"count": 232824,
"is_parallel": true,
"self": 1060.9074607320335
},
"steps_from_proto": {
"total": 91.77654324088905,
"count": 232824,
"is_parallel": true,
"self": 36.872269554863294,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.90427368602576,
"count": 465648,
"is_parallel": true,
"self": 54.90427368602576
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 542.1940030679207,
"count": 232825,
"self": 6.511636836833759,
"children": {
"process_trajectory": {
"total": 171.00697057108675,
"count": 232825,
"self": 169.55342758408722,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4535429869995369,
"count": 10,
"self": 1.4535429869995369
}
}
},
"_update_policy": {
"total": 364.6753956600001,
"count": 97,
"self": 305.04166708500014,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.633728574999964,
"count": 2910,
"self": 59.633728574999964
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.289998731925152e-07,
"count": 1,
"self": 8.289998731925152e-07
},
"TrainerController._save_models": {
"total": 0.12069313999973019,
"count": 1,
"self": 0.0020291199998609954,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1186640199998692,
"count": 1,
"self": 0.1186640199998692
}
}
}
}
}
}
}