ppo-Pyramids / run_logs /timers.json
Oslaw's picture
First Push
1f645f4
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4364183247089386,
"min": 0.4364183247089386,
"max": 1.3196464776992798,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12952.8955078125,
"min": 12952.8955078125,
"max": 40032.796875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989889.0,
"min": 29900.0,
"max": 989889.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989889.0,
"min": 29900.0,
"max": 989889.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.39364901185035706,
"min": -0.15100909769535065,
"max": 0.41996756196022034,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 106.28523254394531,
"min": -35.78915786743164,
"max": 113.39124298095703,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.029679453000426292,
"min": -0.029679453000426292,
"max": 0.35827627778053284,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -8.013452529907227,
"min": -8.013452529907227,
"max": 87.0611343383789,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0681851870380342,
"min": 0.06478606916956554,
"max": 0.07543013763343223,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.022777805570513,
"min": 0.5280109634340256,
"max": 1.071900576973955,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015913735126378014,
"min": 0.0008934209021942349,
"max": 0.018448189659726007,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23870602689567022,
"min": 0.011614471728525054,
"max": 0.2582746552361641,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.576577474506669e-06,
"min": 7.576577474506669e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011364866211760003,
"min": 0.00011364866211760003,
"max": 0.0035082497305834997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252549333333336,
"min": 0.10252549333333336,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5378824000000004,
"min": 1.3886848,
"max": 2.5694165000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026229678400000004,
"min": 0.00026229678400000004,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003934451760000001,
"min": 0.003934451760000001,
"max": 0.11696470835,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01663113385438919,
"min": 0.016229888424277306,
"max": 0.5719889998435974,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.24946701526641846,
"min": 0.22721843421459198,
"max": 4.003922939300537,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 444.4925373134328,
"min": 427.4714285714286,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29781.0,
"min": 16827.0,
"max": 33109.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4957850456460198,
"min": -0.9999200517932574,
"max": 1.4957850456460198,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 100.21759805828333,
"min": -29.997601553797722,
"max": 105.21799843013287,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4957850456460198,
"min": -0.9999200517932574,
"max": 1.4957850456460198,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 100.21759805828333,
"min": -29.997601553797722,
"max": 105.21799843013287,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.07591465595803822,
"min": 0.07591465595803822,
"max": 10.387660105438794,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.08628194918856,
"min": 5.08628194918856,
"max": 176.5902217924595,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689682832",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689685289"
},
"total": 2457.0226278630003,
"count": 1,
"self": 0.5260259930000757,
"children": {
"run_training.setup": {
"total": 0.04456316000005245,
"count": 1,
"self": 0.04456316000005245
},
"TrainerController.start_learning": {
"total": 2456.45203871,
"count": 1,
"self": 1.7596926980413627,
"children": {
"TrainerController._reset_env": {
"total": 4.856784271000151,
"count": 1,
"self": 4.856784271000151
},
"TrainerController.advance": {
"total": 2449.7368411139582,
"count": 63518,
"self": 1.848386117006612,
"children": {
"env_step": {
"total": 1735.1779525079828,
"count": 63518,
"self": 1600.275293484918,
"children": {
"SubprocessEnvManager._take_step": {
"total": 133.80329847807593,
"count": 63518,
"self": 5.4735015841133645,
"children": {
"TorchPolicy.evaluate": {
"total": 128.32979689396257,
"count": 62552,
"self": 128.32979689396257
}
}
},
"workers": {
"total": 1.09936054498894,
"count": 63518,
"self": 0.0,
"children": {
"worker_root": {
"total": 2450.566689787935,
"count": 63518,
"is_parallel": true,
"self": 985.1818190009344,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020937159999903088,
"count": 1,
"is_parallel": true,
"self": 0.0006232149999050307,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001470501000085278,
"count": 8,
"is_parallel": true,
"self": 0.001470501000085278
}
}
},
"UnityEnvironment.step": {
"total": 0.0833203100000901,
"count": 1,
"is_parallel": true,
"self": 0.0006282059998738987,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045959199997014366,
"count": 1,
"is_parallel": true,
"self": 0.00045959199997014366
},
"communicator.exchange": {
"total": 0.08028509700011455,
"count": 1,
"is_parallel": true,
"self": 0.08028509700011455
},
"steps_from_proto": {
"total": 0.0019474150001315138,
"count": 1,
"is_parallel": true,
"self": 0.00038080100034676434,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015666139997847495,
"count": 8,
"is_parallel": true,
"self": 0.0015666139997847495
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1465.3848707870006,
"count": 63517,
"is_parallel": true,
"self": 38.93920957700084,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.597454306067675,
"count": 63517,
"is_parallel": true,
"self": 25.597454306067675
},
"communicator.exchange": {
"total": 1279.9858864999517,
"count": 63517,
"is_parallel": true,
"self": 1279.9858864999517
},
"steps_from_proto": {
"total": 120.86232040398045,
"count": 63517,
"is_parallel": true,
"self": 23.86798548109732,
"children": {
"_process_rank_one_or_two_observation": {
"total": 96.99433492288313,
"count": 508136,
"is_parallel": true,
"self": 96.99433492288313
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 712.7105024889688,
"count": 63518,
"self": 3.267680451985825,
"children": {
"process_trajectory": {
"total": 120.27048939998394,
"count": 63518,
"self": 120.04189057598387,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22859882400007336,
"count": 2,
"self": 0.22859882400007336
}
}
},
"_update_policy": {
"total": 589.172332636999,
"count": 454,
"self": 381.09204814003147,
"children": {
"TorchPPOOptimizer.update": {
"total": 208.08028449696758,
"count": 22800,
"self": 208.08028449696758
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2210002751089633e-06,
"count": 1,
"self": 1.2210002751089633e-06
},
"TrainerController._save_models": {
"total": 0.09871940599987283,
"count": 1,
"self": 0.0014949119995435467,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09722449400032929,
"count": 1,
"self": 0.09722449400032929
}
}
}
}
}
}
}