ppo-Huggy / run_logs /timers.json
kailashsp's picture
Huggy
238e343
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.402669906616211,
"min": 1.402669906616211,
"max": 1.4265022277832031,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69443.3828125,
"min": 67718.7265625,
"max": 79473.0,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 103.66525423728814,
"min": 87.17491166077738,
"max": 392.9685039370079,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48930.0,
"min": 48855.0,
"max": 50078.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999980.0,
"min": 49368.0,
"max": 1999980.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999980.0,
"min": 49368.0,
"max": 1999980.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3273210525512695,
"min": 0.08333880454301834,
"max": 2.4460620880126953,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1098.4954833984375,
"min": 10.500689506530762,
"max": 1350.4368896484375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.545127571765649,
"min": 1.8064893009880232,
"max": 3.9783428779916266,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1673.3002138733864,
"min": 227.61765192449093,
"max": 2165.2409418821335,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.545127571765649,
"min": 1.8064893009880232,
"max": 3.9783428779916266,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1673.3002138733864,
"min": 227.61765192449093,
"max": 2165.2409418821335,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014592017988777822,
"min": 0.012267607592366402,
"max": 0.019799958011976235,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04377605396633347,
"min": 0.024535215184732805,
"max": 0.05939987403592871,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.046377381061514227,
"min": 0.02069087317213416,
"max": 0.05797531430919965,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.13913214318454267,
"min": 0.04138174634426832,
"max": 0.1714813775072495,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6152987949333295e-06,
"min": 3.6152987949333295e-06,
"max": 0.000295359976546675,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0845896384799988e-05,
"min": 1.0845896384799988e-05,
"max": 0.0008441116686294499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10120506666666666,
"min": 0.10120506666666666,
"max": 0.198453325,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3036152,
"min": 0.20754425000000004,
"max": 0.58137055,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.01328266666666e-05,
"min": 7.01328266666666e-05,
"max": 0.004922820917500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021039847999999978,
"min": 0.00021039847999999978,
"max": 0.014070390445,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678270765",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678273350"
},
"total": 2584.658673856,
"count": 1,
"self": 0.44479924799998116,
"children": {
"run_training.setup": {
"total": 0.11207445800005189,
"count": 1,
"self": 0.11207445800005189
},
"TrainerController.start_learning": {
"total": 2584.1018001499997,
"count": 1,
"self": 4.6185044011085665,
"children": {
"TrainerController._reset_env": {
"total": 9.443798042000026,
"count": 1,
"self": 9.443798042000026
},
"TrainerController.advance": {
"total": 2569.924823888891,
"count": 232167,
"self": 5.143489133814455,
"children": {
"env_step": {
"total": 2005.542078685045,
"count": 232167,
"self": 1672.757015552945,
"children": {
"SubprocessEnvManager._take_step": {
"total": 329.71666865308475,
"count": 232167,
"self": 17.43242432013085,
"children": {
"TorchPolicy.evaluate": {
"total": 312.2842443329539,
"count": 223024,
"self": 77.2320422010664,
"children": {
"TorchPolicy.sample_actions": {
"total": 235.0522021318875,
"count": 223024,
"self": 235.0522021318875
}
}
}
}
},
"workers": {
"total": 3.0683944790152964,
"count": 232167,
"self": 0.0,
"children": {
"worker_root": {
"total": 2574.9174379957967,
"count": 232167,
"is_parallel": true,
"self": 1220.7829908347658,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009875540001758054,
"count": 1,
"is_parallel": true,
"self": 0.0003796850000981067,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006078690000776987,
"count": 2,
"is_parallel": true,
"self": 0.0006078690000776987
}
}
},
"UnityEnvironment.step": {
"total": 0.07387464399994315,
"count": 1,
"is_parallel": true,
"self": 0.00036667999961537134,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022085400019022927,
"count": 1,
"is_parallel": true,
"self": 0.00022085400019022927
},
"communicator.exchange": {
"total": 0.07083684300005189,
"count": 1,
"is_parallel": true,
"self": 0.07083684300005189
},
"steps_from_proto": {
"total": 0.0024502670000856597,
"count": 1,
"is_parallel": true,
"self": 0.0002941010000085953,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021561660000770644,
"count": 2,
"is_parallel": true,
"self": 0.0021561660000770644
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1354.134447161031,
"count": 232166,
"is_parallel": true,
"self": 40.21728005491741,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 86.32860143607763,
"count": 232166,
"is_parallel": true,
"self": 86.32860143607763
},
"communicator.exchange": {
"total": 1129.911195797001,
"count": 232166,
"is_parallel": true,
"self": 1129.911195797001
},
"steps_from_proto": {
"total": 97.67736987303488,
"count": 232166,
"is_parallel": true,
"self": 42.123816325133475,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.5535535479014,
"count": 464332,
"is_parallel": true,
"self": 55.5535535479014
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 559.2392560700314,
"count": 232167,
"self": 7.127967008024825,
"children": {
"process_trajectory": {
"total": 177.01906942600544,
"count": 232167,
"self": 175.85898210100504,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1600873250004042,
"count": 10,
"self": 1.1600873250004042
}
}
},
"_update_policy": {
"total": 375.0922196360011,
"count": 97,
"self": 315.57464162398946,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.517578012011654,
"count": 2910,
"self": 59.517578012011654
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.37000095291296e-07,
"count": 1,
"self": 9.37000095291296e-07
},
"TrainerController._save_models": {
"total": 0.11467288099993311,
"count": 1,
"self": 0.002163785999528045,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11250909500040507,
"count": 1,
"self": 0.11250909500040507
}
}
}
}
}
}
}