ppo-Huggy / run_logs /timers.json
Sil3ntt's picture
Huggy
b66b89d verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4128448963165283,
"min": 1.4128448963165283,
"max": 1.432537317276001,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70416.1875,
"min": 69564.625,
"max": 76528.421875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 83.47038917089678,
"min": 80.30243902439024,
"max": 390.5,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49331.0,
"min": 48830.0,
"max": 49984.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999265.0,
"min": 49625.0,
"max": 1999265.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999265.0,
"min": 49625.0,
"max": 1999265.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.395709991455078,
"min": 0.10957663506269455,
"max": 2.5216548442840576,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1415.8646240234375,
"min": 13.91623306274414,
"max": 1528.158203125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6229232034304015,
"min": 1.7737986861955461,
"max": 3.931097707035495,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2141.1476132273674,
"min": 225.27243314683437,
"max": 2340.871709227562,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6229232034304015,
"min": 1.7737986861955461,
"max": 3.931097707035495,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2141.1476132273674,
"min": 225.27243314683437,
"max": 2340.871709227562,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01667153416828013,
"min": 0.013518326311653558,
"max": 0.020769643696257847,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0500146025048404,
"min": 0.027036652623307116,
"max": 0.057175489795918105,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05406608457366626,
"min": 0.021767253739138445,
"max": 0.06167548950761556,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16219825372099878,
"min": 0.04353450747827689,
"max": 0.1707668344179789,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.7487987504333267e-06,
"min": 3.7487987504333267e-06,
"max": 0.00029538667653777493,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.124639625129998e-05,
"min": 1.124639625129998e-05,
"max": 0.0008442336185888001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10124956666666667,
"min": 0.10124956666666667,
"max": 0.19846222500000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3037487,
"min": 0.20763195,
"max": 0.5814112000000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.235337666666656e-05,
"min": 7.235337666666656e-05,
"max": 0.0049232650275,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021706012999999966,
"min": 0.00021706012999999966,
"max": 0.01407241888,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739893569",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739895952"
},
"total": 2383.133621902,
"count": 1,
"self": 0.4433116040004279,
"children": {
"run_training.setup": {
"total": 0.02239850499995555,
"count": 1,
"self": 0.02239850499995555
},
"TrainerController.start_learning": {
"total": 2382.667911793,
"count": 1,
"self": 4.323136984991379,
"children": {
"TrainerController._reset_env": {
"total": 3.2819625570000426,
"count": 1,
"self": 3.2819625570000426
},
"TrainerController.advance": {
"total": 2374.9520744850083,
"count": 232650,
"self": 4.663443821148121,
"children": {
"env_step": {
"total": 1871.8163257239983,
"count": 232650,
"self": 1469.1108960377599,
"children": {
"SubprocessEnvManager._take_step": {
"total": 400.1434886711395,
"count": 232650,
"self": 15.293371156229114,
"children": {
"TorchPolicy.evaluate": {
"total": 384.8501175149104,
"count": 222967,
"self": 384.8501175149104
}
}
},
"workers": {
"total": 2.5619410150989097,
"count": 232650,
"self": 0.0,
"children": {
"worker_root": {
"total": 2375.7362971778603,
"count": 232650,
"is_parallel": true,
"self": 1176.1696129238774,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010650169999735226,
"count": 1,
"is_parallel": true,
"self": 0.00034418400014146755,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007208329998320551,
"count": 2,
"is_parallel": true,
"self": 0.0007208329998320551
}
}
},
"UnityEnvironment.step": {
"total": 0.05040084099982778,
"count": 1,
"is_parallel": true,
"self": 0.0002911060000769794,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001847879998422286,
"count": 1,
"is_parallel": true,
"self": 0.0001847879998422286
},
"communicator.exchange": {
"total": 0.04928504500003328,
"count": 1,
"is_parallel": true,
"self": 0.04928504500003328
},
"steps_from_proto": {
"total": 0.000639901999875292,
"count": 1,
"is_parallel": true,
"self": 0.00017784299984668905,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004620590000286029,
"count": 2,
"is_parallel": true,
"self": 0.0004620590000286029
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1199.5666842539829,
"count": 232649,
"is_parallel": true,
"self": 36.332004332888346,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.51164616803999,
"count": 232649,
"is_parallel": true,
"self": 78.51164616803999
},
"communicator.exchange": {
"total": 999.189465496142,
"count": 232649,
"is_parallel": true,
"self": 999.189465496142
},
"steps_from_proto": {
"total": 85.53356825691253,
"count": 232649,
"is_parallel": true,
"self": 30.420015526882025,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.113552730030506,
"count": 465298,
"is_parallel": true,
"self": 55.113552730030506
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 498.47230493986194,
"count": 232650,
"self": 6.1486264379420845,
"children": {
"process_trajectory": {
"total": 161.15218068991908,
"count": 232650,
"self": 159.81206254691983,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3401181429992448,
"count": 10,
"self": 1.3401181429992448
}
}
},
"_update_policy": {
"total": 331.1714978120008,
"count": 97,
"self": 264.9078571840057,
"children": {
"TorchPPOOptimizer.update": {
"total": 66.26364062799507,
"count": 2910,
"self": 66.26364062799507
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0250000741507392e-06,
"count": 1,
"self": 1.0250000741507392e-06
},
"TrainerController._save_models": {
"total": 0.11073674099998243,
"count": 1,
"self": 0.001874418000170408,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10886232299981202,
"count": 1,
"self": 0.10886232299981202
}
}
}
}
}
}
}