ppo-Pyramids / run_logs /timers.json
Yooniel's picture
Push to Hub
b3dffc8 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.31355521082878113,
"min": 0.31069883704185486,
"max": 1.470126748085022,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9406.65625,
"min": 9335.87890625,
"max": 44597.765625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989879.0,
"min": 29952.0,
"max": 989879.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989879.0,
"min": 29952.0,
"max": 989879.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6625138521194458,
"min": -0.08625052869319916,
"max": 0.6625138521194458,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 190.80398559570312,
"min": -20.786376953125,
"max": 190.80398559570312,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.023519672453403473,
"min": -0.0003497330762911588,
"max": 0.4878869354724884,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.773665428161621,
"min": -0.09617659449577332,
"max": 115.62920379638672,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07071380875727516,
"min": 0.06527877752501762,
"max": 0.07279266869036724,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9899933226018522,
"min": 0.5013061369009565,
"max": 1.0399303043765635,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0172879818502614,
"min": 0.00024956367645216735,
"max": 0.018370240206584437,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24203174590365958,
"min": 0.0032443277938781754,
"max": 0.27555360309876653,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.475647508149998e-06,
"min": 7.475647508149998e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010465906511409997,
"min": 0.00010465906511409997,
"max": 0.0033758758747080998,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249184999999998,
"min": 0.10249184999999998,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348858999999998,
"min": 1.3886848,
"max": 2.4846014000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025893581499999996,
"min": 0.00025893581499999996,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036251014099999996,
"min": 0.0036251014099999996,
"max": 0.11254666080999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014173387549817562,
"min": 0.013540646992623806,
"max": 0.5382587909698486,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19842742383480072,
"min": 0.18956905603408813,
"max": 3.7678115367889404,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 275.27272727272725,
"min": 275.27272727272725,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30280.0,
"min": 15984.0,
"max": 33357.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6696161978148125,
"min": -1.0000000521540642,
"max": 1.6931235131387616,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 185.3273979574442,
"min": -31.994001641869545,
"max": 185.3273979574442,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6696161978148125,
"min": -1.0000000521540642,
"max": 1.6931235131387616,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 185.3273979574442,
"min": -31.994001641869545,
"max": 185.3273979574442,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03997452010970609,
"min": 0.03997452010970609,
"max": 11.351814521476626,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.437171732177376,
"min": 3.911135181755526,
"max": 181.62903234362602,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1736361374",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1736363959"
},
"total": 2584.4189311669998,
"count": 1,
"self": 0.53427792399998,
"children": {
"run_training.setup": {
"total": 0.059103026000229875,
"count": 1,
"self": 0.059103026000229875
},
"TrainerController.start_learning": {
"total": 2583.8255502169995,
"count": 1,
"self": 1.45935314187318,
"children": {
"TrainerController._reset_env": {
"total": 2.49851868199994,
"count": 1,
"self": 2.49851868199994
},
"TrainerController.advance": {
"total": 2579.7792310251266,
"count": 64060,
"self": 1.5040922263351604,
"children": {
"env_step": {
"total": 1813.65362438097,
"count": 64060,
"self": 1654.0351420987613,
"children": {
"SubprocessEnvManager._take_step": {
"total": 158.71886761307178,
"count": 64060,
"self": 4.973839838998629,
"children": {
"TorchPolicy.evaluate": {
"total": 153.74502777407315,
"count": 62571,
"self": 153.74502777407315
}
}
},
"workers": {
"total": 0.8996146691370086,
"count": 64060,
"self": 0.0,
"children": {
"worker_root": {
"total": 2578.206185923049,
"count": 64060,
"is_parallel": true,
"self": 1051.2227796260863,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023062710001795494,
"count": 1,
"is_parallel": true,
"self": 0.0007542900007138087,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015519809994657408,
"count": 8,
"is_parallel": true,
"self": 0.0015519809994657408
}
}
},
"UnityEnvironment.step": {
"total": 0.0518640619998223,
"count": 1,
"is_parallel": true,
"self": 0.0007437440003741358,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005223939997449634,
"count": 1,
"is_parallel": true,
"self": 0.0005223939997449634
},
"communicator.exchange": {
"total": 0.0485710759999165,
"count": 1,
"is_parallel": true,
"self": 0.0485710759999165
},
"steps_from_proto": {
"total": 0.002026847999786696,
"count": 1,
"is_parallel": true,
"self": 0.0004710150001301372,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001555832999656559,
"count": 8,
"is_parallel": true,
"self": 0.001555832999656559
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1526.9834062969626,
"count": 64059,
"is_parallel": true,
"self": 36.46983750492063,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.21280346103549,
"count": 64059,
"is_parallel": true,
"self": 24.21280346103549
},
"communicator.exchange": {
"total": 1358.5405101849965,
"count": 64059,
"is_parallel": true,
"self": 1358.5405101849965
},
"steps_from_proto": {
"total": 107.76025514600997,
"count": 64059,
"is_parallel": true,
"self": 22.100596488935025,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.65965865707494,
"count": 512472,
"is_parallel": true,
"self": 85.65965865707494
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 764.6215144178213,
"count": 64060,
"self": 2.850045881843471,
"children": {
"process_trajectory": {
"total": 144.41269349397044,
"count": 64060,
"self": 144.1473300929706,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26536340099983136,
"count": 2,
"self": 0.26536340099983136
}
}
},
"_update_policy": {
"total": 617.3587750420074,
"count": 449,
"self": 350.48584335497026,
"children": {
"TorchPPOOptimizer.update": {
"total": 266.8729316870372,
"count": 22836,
"self": 266.8729316870372
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.69999746303074e-07,
"count": 1,
"self": 9.69999746303074e-07
},
"TrainerController._save_models": {
"total": 0.08844639800008736,
"count": 1,
"self": 0.00160165000033885,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0868447479997485,
"count": 1,
"self": 0.0868447479997485
}
}
}
}
}
}
}