ppo-Pyramids / run_logs /timers.json
ramsi-k's picture
First Push
1d124fd verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.47236329317092896,
"min": 0.458514928817749,
"max": 1.4433969259262085,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14170.8984375,
"min": 13740.775390625,
"max": 43786.890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989920.0,
"min": 29952.0,
"max": 989920.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989920.0,
"min": 29952.0,
"max": 989920.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.36124733090400696,
"min": -0.09892983734607697,
"max": 0.48307329416275024,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 95.36929321289062,
"min": -23.842090606689453,
"max": 131.39593505859375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.042085569351911545,
"min": -0.012892386876046658,
"max": 0.25710371136665344,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 11.110589981079102,
"min": -3.2102043628692627,
"max": 61.70488739013672,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06801222333380808,
"min": 0.06588454707387081,
"max": 0.07389297727172374,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.952171126673313,
"min": 0.49904678346175735,
"max": 1.0642044619501878,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01306504421921725,
"min": 0.0005778326679845515,
"max": 0.01478731196749279,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1829106190690415,
"min": 0.0072803072817470985,
"max": 0.20702236754489906,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.53245463207143e-06,
"min": 7.53245463207143e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010545436484900002,
"min": 0.00010545436484900002,
"max": 0.0035082539305821,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251078571428573,
"min": 0.10251078571428573,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4351510000000003,
"min": 1.3691136000000002,
"max": 2.5694179,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002608274928571429,
"min": 0.0002608274928571429,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036515849000000006,
"min": 0.0036515849000000006,
"max": 0.11696484820999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0095343217253685,
"min": 0.009474159218370914,
"max": 0.4751955568790436,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.133480504155159,
"min": 0.13263823091983795,
"max": 3.326368808746338,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 452.2985074626866,
"min": 372.3703703703704,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30304.0,
"min": 15984.0,
"max": 33547.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2491313183485573,
"min": -1.0000000521540642,
"max": 1.5025874801911414,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 83.69179832935333,
"min": -32.000001668930054,
"max": 120.20699841529131,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2491313183485573,
"min": -1.0000000521540642,
"max": 1.5025874801911414,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 83.69179832935333,
"min": -32.000001668930054,
"max": 120.20699841529131,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04458860680765,
"min": 0.03814346618655691,
"max": 10.327977978624403,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.98743665611255,
"min": 2.98743665611255,
"max": 165.24764765799046,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1707230779",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1707232928"
},
"total": 2149.007663271,
"count": 1,
"self": 0.4754602240004715,
"children": {
"run_training.setup": {
"total": 0.05280319700000291,
"count": 1,
"self": 0.05280319700000291
},
"TrainerController.start_learning": {
"total": 2148.47939985,
"count": 1,
"self": 1.4350300560631695,
"children": {
"TrainerController._reset_env": {
"total": 2.260260745999858,
"count": 1,
"self": 2.260260745999858
},
"TrainerController.advance": {
"total": 2144.7024454939365,
"count": 63584,
"self": 1.504322958908233,
"children": {
"env_step": {
"total": 1508.8654234179921,
"count": 63584,
"self": 1372.0094006500444,
"children": {
"SubprocessEnvManager._take_step": {
"total": 135.94531190199837,
"count": 63584,
"self": 4.9240982810104015,
"children": {
"TorchPolicy.evaluate": {
"total": 131.02121362098796,
"count": 62560,
"self": 131.02121362098796
}
}
},
"workers": {
"total": 0.9107108659493406,
"count": 63584,
"self": 0.0,
"children": {
"worker_root": {
"total": 2143.132666595949,
"count": 63584,
"is_parallel": true,
"self": 894.5170995329568,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021812039999531407,
"count": 1,
"is_parallel": true,
"self": 0.0007308100000500417,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001450393999903099,
"count": 8,
"is_parallel": true,
"self": 0.001450393999903099
}
}
},
"UnityEnvironment.step": {
"total": 0.06653976599977796,
"count": 1,
"is_parallel": true,
"self": 0.0007478269997136522,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005077109999547247,
"count": 1,
"is_parallel": true,
"self": 0.0005077109999547247
},
"communicator.exchange": {
"total": 0.0561459420000574,
"count": 1,
"is_parallel": true,
"self": 0.0561459420000574
},
"steps_from_proto": {
"total": 0.009138286000052176,
"count": 1,
"is_parallel": true,
"self": 0.007717351999644961,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014209340004072146,
"count": 8,
"is_parallel": true,
"self": 0.0014209340004072146
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1248.6155670629923,
"count": 63583,
"is_parallel": true,
"self": 35.372986165820066,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.497567582085594,
"count": 63583,
"is_parallel": true,
"self": 25.497567582085594
},
"communicator.exchange": {
"total": 1083.9063331300792,
"count": 63583,
"is_parallel": true,
"self": 1083.9063331300792
},
"steps_from_proto": {
"total": 103.83868018500743,
"count": 63583,
"is_parallel": true,
"self": 21.106822948028366,
"children": {
"_process_rank_one_or_two_observation": {
"total": 82.73185723697907,
"count": 508664,
"is_parallel": true,
"self": 82.73185723697907
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 634.3326991170359,
"count": 63584,
"self": 2.726089176970845,
"children": {
"process_trajectory": {
"total": 128.34972294206045,
"count": 63584,
"self": 128.15297425206063,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1967486899998221,
"count": 2,
"self": 0.1967486899998221
}
}
},
"_update_policy": {
"total": 503.2568869980046,
"count": 447,
"self": 295.7731463640348,
"children": {
"TorchPPOOptimizer.update": {
"total": 207.48374063396977,
"count": 22830,
"self": 207.48374063396977
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.61000296229031e-07,
"count": 1,
"self": 9.61000296229031e-07
},
"TrainerController._save_models": {
"total": 0.08166259300014644,
"count": 1,
"self": 0.0013443729999380594,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08031822000020838,
"count": 1,
"self": 0.08031822000020838
}
}
}
}
}
}
}