ppo-Pyramids / run_logs /timers.json
knoffel's picture
lowered max_iterations
8f972a9 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.37492474913597107,
"min": 0.36654171347618103,
"max": 1.472686529159546,
"count": 34
},
"Pyramids.Policy.Entropy.sum": {
"value": 11295.732421875,
"min": 10978.6572265625,
"max": 44675.41796875,
"count": 34
},
"Pyramids.Step.mean": {
"value": 1019999.0,
"min": 29952.0,
"max": 1019999.0,
"count": 34
},
"Pyramids.Step.sum": {
"value": 1019999.0,
"min": 29952.0,
"max": 1019999.0,
"count": 34
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.03032447025179863,
"min": -0.20310690999031067,
"max": -0.03032447025179863,
"count": 34
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -7.429495334625244,
"min": -48.13633728027344,
"max": -7.429495334625244,
"count": 34
},
"Pyramids.Policy.CuriosityValueEstimate.mean": {
"value": 0.1564064770936966,
"min": 0.1564064770936966,
"max": 0.44639909267425537,
"count": 34
},
"Pyramids.Policy.CuriosityValueEstimate.sum": {
"value": 38.31958770751953,
"min": 38.210716247558594,
"max": 108.02857971191406,
"count": 34
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06718603229835869,
"min": 0.06478804335382495,
"max": 0.07394896972318263,
"count": 34
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9406044521770217,
"min": 0.49618800863758766,
"max": 1.0529568219661836,
"count": 34
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.002840339800961007,
"min": 0.00012460844201482606,
"max": 0.004310364597613544,
"count": 34
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.0397647572134541,
"min": 0.0017445181882075648,
"max": 0.06034510436658962,
"count": 34
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.2261868068504335e-05,
"min": 1.2261868068504335e-05,
"max": 0.0002953752805594308,
"count": 34
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00017166615295906068,
"min": 0.00017166615295906068,
"max": 0.0036590319529362673,
"count": 34
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10408725738525391,
"min": 0.10408725738525391,
"max": 0.19845842633928573,
"count": 34
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4572216033935548,
"min": 1.3892089843750002,
"max": 2.6196772575378415,
"count": 34
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00041831701278686524,
"min": 0.00041831701278686524,
"max": 0.009845996791294642,
"count": 34
},
"Pyramids.Policy.Beta.sum": {
"value": 0.005856438179016113,
"min": 0.005856438179016113,
"max": 0.12198575802803038,
"count": 34
},
"Pyramids.Losses.CuriosityForwardLoss.mean": {
"value": 0.07406188456076836,
"min": 0.07187406965550976,
"max": 0.41989175825378267,
"count": 34
},
"Pyramids.Losses.CuriosityForwardLoss.sum": {
"value": 1.036866383850757,
"min": 0.9707603916233661,
"max": 2.9392423077764787,
"count": 34
},
"Pyramids.Losses.CuriosityInverseLoss.mean": {
"value": 0.06697648263649547,
"min": 0.06312481397228102,
"max": 0.7277615498644353,
"count": 34
},
"Pyramids.Losses.CuriosityInverseLoss.sum": {
"value": 0.9376707569109366,
"min": 0.8529724184858529,
"max": 5.0943308490510475,
"count": 34
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 899.060606060606,
"min": 881.741935483871,
"max": 999.0,
"count": 34
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29669.0,
"min": 15984.0,
"max": 33110.0,
"count": 34
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.4147697470404885,
"min": -1.0000000521540642,
"max": -0.23707746089466156,
"count": 34
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -13.68740165233612,
"min": -31.99360166490078,
"max": -7.3494012877345085,
"count": 34
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.4147697470404885,
"min": -1.0000000521540642,
"max": -0.23707746089466156,
"count": 34
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -13.68740165233612,
"min": -31.99360166490078,
"max": -7.3494012877345085,
"count": 34
},
"Pyramids.Policy.CuriosityReward.mean": {
"value": 1.3701743991537527,
"min": 1.3701743991537527,
"max": 6.27729257196188,
"count": 34
},
"Pyramids.Policy.CuriosityReward.sum": {
"value": 45.21575517207384,
"min": 45.21575517207384,
"max": 174.45192439109087,
"count": 34
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 34
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 34
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1738524599",
"python_version": "3.10.16 (main, Dec 4 2024, 08:53:38) [GCC 13.2.0]",
"command_line_arguments": "/home/hrichter/projects/hf_rl_course/.venv/bin/mlagents-learn ml-agents/config/ppo/Pyramids.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1738525559"
},
"total": 960.69417054,
"count": 1,
"self": 0.21820811000020512,
"children": {
"run_training.setup": {
"total": 0.014896433999865621,
"count": 1,
"self": 0.014896433999865621
},
"TrainerController.start_learning": {
"total": 960.4610659959999,
"count": 1,
"self": 0.6438697869684802,
"children": {
"TrainerController._reset_env": {
"total": 1.2939183309999862,
"count": 1,
"self": 1.2939183309999862
},
"TrainerController.advance": {
"total": 958.4810456990313,
"count": 66200,
"self": 0.6672122379852681,
"children": {
"env_step": {
"total": 516.2186331370485,
"count": 66200,
"self": 472.8256553351712,
"children": {
"SubprocessEnvManager._take_step": {
"total": 42.98896845993613,
"count": 66200,
"self": 2.0265824469049676,
"children": {
"TorchPolicy.evaluate": {
"total": 40.962386013031164,
"count": 65598,
"self": 40.962386013031164
}
}
},
"workers": {
"total": 0.40400934194121874,
"count": 66200,
"self": 0.0,
"children": {
"worker_root": {
"total": 959.4559383770147,
"count": 66200,
"is_parallel": true,
"self": 533.8106512569464,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009145409999291587,
"count": 1,
"is_parallel": true,
"self": 0.00022774500007471943,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006867959998544393,
"count": 8,
"is_parallel": true,
"self": 0.0006867959998544393
}
}
},
"UnityEnvironment.step": {
"total": 0.018782304000069416,
"count": 1,
"is_parallel": true,
"self": 0.0003025040000466106,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002825439999014634,
"count": 1,
"is_parallel": true,
"self": 0.0002825439999014634
},
"communicator.exchange": {
"total": 0.01733245400009764,
"count": 1,
"is_parallel": true,
"self": 0.01733245400009764
},
"steps_from_proto": {
"total": 0.0008648020000237011,
"count": 1,
"is_parallel": true,
"self": 0.0001828549998208473,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006819470002028538,
"count": 8,
"is_parallel": true,
"self": 0.0006819470002028538
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 425.64528712006836,
"count": 66199,
"is_parallel": true,
"self": 11.960250902025791,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.491987588990696,
"count": 66199,
"is_parallel": true,
"self": 8.491987588990696
},
"communicator.exchange": {
"total": 370.419048640038,
"count": 66199,
"is_parallel": true,
"self": 370.419048640038
},
"steps_from_proto": {
"total": 34.77399998901387,
"count": 66199,
"is_parallel": true,
"self": 7.488494236913766,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.285505752100107,
"count": 529592,
"is_parallel": true,
"self": 27.285505752100107
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 441.59520032399746,
"count": 66200,
"self": 1.3063789900720622,
"children": {
"process_trajectory": {
"total": 63.53832344492298,
"count": 66200,
"self": 63.442949675923046,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09537376899993433,
"count": 2,
"self": 0.09537376899993433
}
}
},
"_update_policy": {
"total": 376.7504978890024,
"count": 469,
"self": 251.99174821898123,
"children": {
"TorchPPOOptimizer.update": {
"total": 124.75874967002119,
"count": 23895,
"self": 124.75874967002119
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.300000106624793e-07,
"count": 1,
"self": 7.300000106624793e-07
},
"TrainerController._save_models": {
"total": 0.04223144900015541,
"count": 1,
"self": 0.0018124279999938153,
"children": {
"RLTrainer._checkpoint": {
"total": 0.04041902100016159,
"count": 1,
"self": 0.04041902100016159
}
}
}
}
}
}
}