Lendalf's picture
First Push
e6a4ab7
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0101369619369507,
"min": 0.9661122560501099,
"max": 2.8539223670959473,
"count": 36
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9611.453125,
"min": 9203.185546875,
"max": 29289.8046875,
"count": 36
},
"SnowballTarget.Step.mean": {
"value": 359944.0,
"min": 9952.0,
"max": 359944.0,
"count": 36
},
"SnowballTarget.Step.sum": {
"value": 359944.0,
"min": 9952.0,
"max": 359944.0,
"count": 36
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.621232032775879,
"min": 0.26785075664520264,
"max": 13.718734741210938,
"count": 36
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2642.51904296875,
"min": 51.96304702758789,
"max": 2797.57080078125,
"count": 36
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 36
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 36
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06606421664818139,
"min": 0.06405488552837221,
"max": 0.0726014357254505,
"count": 36
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26425686659272557,
"min": 0.25621954211348885,
"max": 0.360864134418422,
"count": 36
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.15996370992678052,
"min": 0.13179133047746933,
"max": 0.2952324056888328,
"count": 36
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6398548397071221,
"min": 0.5271653219098773,
"max": 1.3253511991570976,
"count": 36
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 0.00016679554440150003,
"min": 0.00016679554440150003,
"max": 0.0002979705006765,
"count": 36
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0006671821776060001,
"min": 0.0006671821776060001,
"max": 0.00147129000957,
"count": 36
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.18339775000000003,
"min": 0.18339775000000003,
"max": 0.24898524999999996,
"count": 36
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.7335910000000001,
"min": 0.7335910000000001,
"max": 1.235645,
"count": 36
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0044523201499999995,
"min": 0.0044523201499999995,
"max": 0.007945947650000002,
"count": 36
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.017809280599999998,
"min": 0.017809280599999998,
"max": 0.039235357,
"count": 36
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.09090909090909,
"min": 3.590909090909091,
"max": 27.163636363636364,
"count": 36
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1192.0,
"min": 158.0,
"max": 1494.0,
"count": 36
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.09090909090909,
"min": 3.590909090909091,
"max": 27.163636363636364,
"count": 36
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1192.0,
"min": 158.0,
"max": 1494.0,
"count": 36
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 36
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 36
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684697981",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684698913"
},
"total": 932.4281591630001,
"count": 1,
"self": 0.31070770199994513,
"children": {
"run_training.setup": {
"total": 0.06478223100020841,
"count": 1,
"self": 0.06478223100020841
},
"TrainerController.start_learning": {
"total": 932.05266923,
"count": 1,
"self": 1.0368155170322098,
"children": {
"TrainerController._reset_env": {
"total": 3.967126745999849,
"count": 1,
"self": 3.967126745999849
},
"TrainerController.advance": {
"total": 926.6245281449678,
"count": 33093,
"self": 0.48277396097887504,
"children": {
"env_step": {
"total": 926.141754183989,
"count": 33093,
"self": 703.7906541380517,
"children": {
"SubprocessEnvManager._take_step": {
"total": 221.8476351249833,
"count": 33093,
"self": 3.1763657929961937,
"children": {
"TorchPolicy.evaluate": {
"total": 218.6712693319871,
"count": 33093,
"self": 218.6712693319871
}
}
},
"workers": {
"total": 0.5034649209540021,
"count": 33092,
"self": 0.0,
"children": {
"worker_root": {
"total": 928.919061107986,
"count": 33092,
"is_parallel": true,
"self": 493.1845054869798,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00511526600007528,
"count": 1,
"is_parallel": true,
"self": 0.0038045469996177417,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013107190004575386,
"count": 10,
"is_parallel": true,
"self": 0.0013107190004575386
}
}
},
"UnityEnvironment.step": {
"total": 0.03460879600015687,
"count": 1,
"is_parallel": true,
"self": 0.00055484500012426,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000361246000011306,
"count": 1,
"is_parallel": true,
"self": 0.000361246000011306
},
"communicator.exchange": {
"total": 0.031696307999936835,
"count": 1,
"is_parallel": true,
"self": 0.031696307999936835
},
"steps_from_proto": {
"total": 0.001996397000084471,
"count": 1,
"is_parallel": true,
"self": 0.0004000610006187344,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015963359994657367,
"count": 10,
"is_parallel": true,
"self": 0.0015963359994657367
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 435.73455562100617,
"count": 33091,
"is_parallel": true,
"self": 17.027239797940865,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.33641341601242,
"count": 33091,
"is_parallel": true,
"self": 9.33641341601242
},
"communicator.exchange": {
"total": 351.5178492520083,
"count": 33091,
"is_parallel": true,
"self": 351.5178492520083
},
"steps_from_proto": {
"total": 57.85305315504456,
"count": 33091,
"is_parallel": true,
"self": 11.353279621086358,
"children": {
"_process_rank_one_or_two_observation": {
"total": 46.499773533958205,
"count": 330910,
"is_parallel": true,
"self": 46.499773533958205
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.9622999995335704e-05,
"count": 1,
"self": 4.9622999995335704e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 920.4740858690102,
"count": 791814,
"is_parallel": true,
"self": 16.941190049013358,
"children": {
"process_trajectory": {
"total": 439.32065475200034,
"count": 791814,
"is_parallel": true,
"self": 437.94801249200054,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3726422599997932,
"count": 7,
"is_parallel": true,
"self": 1.3726422599997932
}
}
},
"_update_policy": {
"total": 464.2122410679965,
"count": 165,
"is_parallel": true,
"self": 187.49770782199175,
"children": {
"TorchPPOOptimizer.update": {
"total": 276.71453324600475,
"count": 14020,
"is_parallel": true,
"self": 276.71453324600475
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.4241491990001123,
"count": 1,
"self": 0.001238764999925479,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4229104340001868,
"count": 1,
"self": 0.4229104340001868
}
}
}
}
}
}
}