{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.7879307866096497, "min": 0.7798956036567688, "max": 1.4863524436950684, "count": 16 }, "Pyramids.Policy.Entropy.sum": { "value": 23411.0, "min": 23411.0, "max": 45089.98828125, "count": 16 }, "Pyramids.Step.mean": { "value": 479977.0, "min": 29939.0, "max": 479977.0, "count": 16 }, "Pyramids.Step.sum": { "value": 479977.0, "min": 29939.0, "max": 479977.0, "count": 16 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.30129289627075195, "min": -0.11709693819284439, "max": 0.30129289627075195, "count": 16 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 77.1309814453125, "min": -27.75197410583496, "max": 77.1309814453125, "count": 16 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.02765870839357376, "min": 0.02012721635401249, "max": 0.3780697286128998, "count": 16 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 7.080629348754883, "min": 5.051931381225586, "max": 89.60252380371094, "count": 16 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06748696474489312, "min": 0.0664296471239074, "max": 0.07312290836374974, "count": 16 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 1.0123044711733968, "min": 0.6581061752737477, "max": 1.0123044711733968, "count": 16 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.011431089166462044, "min": 0.0007473888176604587, "max": 0.011511775219959487, "count": 16 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.17146633749693066, "min": 0.009716054629585963, "max": 0.17146633749693066, "count": 16 }, "Pyramids.Policy.LearningRate.mean": { "value": 2.1010732996453333e-05, "min": 2.1010732996453333e-05, "max": 0.0002897527367490888, "count": 16 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.0003151609949468, "min": 0.0003151609949468, "max": 0.0033170515943161997, "count": 16 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10700354666666666, "min": 0.10700354666666666, "max": 0.19658424444444444, "count": 16 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.6050532, "min": 1.5846114, "max": 2.4056838000000003, "count": 16 }, "Pyramids.Policy.Beta.mean": { "value": 0.0007096543119999999, "min": 0.0007096543119999999, "max": 0.009658766019999999, "count": 16 }, "Pyramids.Policy.Beta.sum": { "value": 0.01064481468, "min": 0.01064481468, "max": 0.11058781162, "count": 16 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.01675165630877018, "min": 0.01675165630877018, "max": 0.3614523708820343, "count": 16 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.25127485394477844, "min": 0.25127485394477844, "max": 3.2530713081359863, "count": 16 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 602.3921568627451, "min": 555.48, "max": 990.25, "count": 16 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 30722.0, "min": 16610.0, "max": 33407.0, "count": 16 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 0.9661332979506138, "min": -0.9286187971010804, "max": 1.0827842885080505, "count": 16 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 49.2727981954813, "min": -30.240801632404327, "max": 55.22199871391058, "count": 16 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 0.9661332979506138, "min": -0.9286187971010804, "max": 1.0827842885080505, "count": 16 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 49.2727981954813, "min": -30.240801632404327, "max": 55.22199871391058, "count": 16 }, "Pyramids.Policy.RndReward.mean": { "value": 0.10569652314434815, "min": 0.10540710396804463, "max": 6.297938686959884, "count": 16 }, "Pyramids.Policy.RndReward.sum": { "value": 5.390522680361755, "min": 5.375762302370276, "max": 107.06495767831802, "count": 16 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 16 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 16 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1679241732", "python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1679242795" }, "total": 1063.623661226, "count": 1, "self": 0.922740370999918, "children": { "run_training.setup": { "total": 0.2208115099999759, "count": 1, "self": 0.2208115099999759 }, "TrainerController.start_learning": { "total": 1062.480109345, "count": 1, "self": 0.7289624619922961, "children": { "TrainerController._reset_env": { "total": 6.6819898849998935, "count": 1, "self": 6.6819898849998935 }, "TrainerController.advance": { "total": 1054.930506734008, "count": 31705, "self": 0.7857568269714648, "children": { "env_step": { "total": 742.7545380100102, "count": 31705, "self": 684.428435121029, "children": { "SubprocessEnvManager._take_step": { "total": 57.863943221003865, "count": 31705, "self": 2.4792560059992184, "children": { "TorchPolicy.evaluate": { "total": 55.38468721500465, "count": 31325, "self": 55.38468721500465 } } }, "workers": { "total": 0.46215966797728925, "count": 31705, "self": 0.0, "children": { "worker_root": { "total": 1059.9195409480283, "count": 31705, "is_parallel": true, "self": 435.8469882600233, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.002547735999996803, "count": 1, "is_parallel": true, "self": 0.0007556589997648189, "children": { "_process_rank_one_or_two_observation": { "total": 0.0017920770002319841, "count": 8, "is_parallel": true, "self": 0.0017920770002319841 } } }, "UnityEnvironment.step": { "total": 0.04570040499993411, "count": 1, "is_parallel": true, "self": 0.0005166009998447407, "children": { "UnityEnvironment._generate_step_input": { "total": 0.000470125999981974, "count": 1, "is_parallel": true, "self": 0.000470125999981974 }, "communicator.exchange": { "total": 0.043151348000037615, "count": 1, "is_parallel": true, "self": 0.043151348000037615 }, "steps_from_proto": { "total": 0.0015623300000697782, "count": 1, "is_parallel": true, "self": 0.0003440160002128323, "children": { "_process_rank_one_or_two_observation": { "total": 0.0012183139998569459, "count": 8, "is_parallel": true, "self": 0.0012183139998569459 } } } } } } }, "UnityEnvironment.step": { "total": 624.072552688005, "count": 31704, "is_parallel": true, "self": 15.841444078962013, "children": { "UnityEnvironment._generate_step_input": { "total": 11.74328116402512, "count": 31704, "is_parallel": true, "self": 11.74328116402512 }, "communicator.exchange": { "total": 549.7589364910261, "count": 31704, "is_parallel": true, "self": 549.7589364910261 }, "steps_from_proto": { "total": 46.72889095399171, "count": 31704, "is_parallel": true, "self": 10.117620485966881, "children": { "_process_rank_one_or_two_observation": { "total": 36.61127046802483, "count": 253632, "is_parallel": true, "self": 36.61127046802483 } } } } } } } } } } }, "trainer_advance": { "total": 311.39021189702646, "count": 31705, "self": 1.3202999310308314, "children": { "process_trajectory": { "total": 57.90685148799673, "count": 31705, "self": 57.74754467999662, "children": { "RLTrainer._checkpoint": { "total": 0.15930680800011032, "count": 1, "self": 0.15930680800011032 } } }, "_update_policy": { "total": 252.1630604779989, "count": 221, "self": 160.73096351797892, "children": { "TorchPPOOptimizer.update": { "total": 91.43209696001998, "count": 11379, "self": 91.43209696001998 } } } } } } }, "trainer_threads": { "total": 1.2629998309421353e-06, "count": 1, "self": 1.2629998309421353e-06 }, "TrainerController._save_models": { "total": 0.13864900099997612, "count": 1, "self": 0.0018968489998769655, "children": { "RLTrainer._checkpoint": { "total": 0.13675215200009916, "count": 1, "self": 0.13675215200009916 } } } } } } }