SoccerTwos4090 / run_logs /timers.json
ShreyasM's picture
SoccerTwos 4090
bc21b94
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": -3.576278402306343e-07,
"min": -3.576278402306343e-07,
"max": 3.2945072650909424,
"count": 3825
},
"SoccerTwos.Policy.Entropy.sum": {
"value": -0.00781631376594305,
"min": -0.019557951018214226,
"max": 167398.328125,
"count": 3825
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 720.1428571428571,
"min": 285.06666666666666,
"max": 999.0,
"count": 3825
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20164.0,
"min": 3660.0,
"max": 28808.0,
"count": 3825
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1569.9423849064597,
"min": 1530.1366432078805,
"max": 1635.3633316190458,
"count": 3074
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 6279.769539625839,
"min": 3060.273286415761,
"max": 27767.161065008182,
"count": 3074
},
"SoccerTwos.Step.mean": {
"value": 49999654.0,
"min": 11759873.0,
"max": 49999654.0,
"count": 3825
},
"SoccerTwos.Step.sum": {
"value": 49999654.0,
"min": 11759873.0,
"max": 49999654.0,
"count": 3825
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.009638502262532711,
"min": -0.026710689067840576,
"max": 0.011438295245170593,
"count": 3825
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.1349390298128128,
"min": -0.47578129172325134,
"max": 0.2820509672164917,
"count": 3825
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.009638502262532711,
"min": -0.026710689067840576,
"max": 0.011438295245170593,
"count": 3825
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.1349390298128128,
"min": -0.47578129172325134,
"max": 0.2820509672164917,
"count": 3825
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 3825
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 3825
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.002171431268964495,
"min": -0.625,
"max": 0.45912940361920523,
"count": 3825
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -0.03040003776550293,
"min": -12.0,
"max": 9.62279999256134,
"count": 3825
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.002171431268964495,
"min": -0.625,
"max": 0.45912940361920523,
"count": 3825
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -0.03040003776550293,
"min": -12.0,
"max": 9.62279999256134,
"count": 3825
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3825
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3825
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.005191320995800197,
"min": 0.0025595865241484716,
"max": 0.0512155355347204,
"count": 1754
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.005191320995800197,
"min": 0.0025595865241484716,
"max": 0.0512155355347204,
"count": 1754
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.004174860217608512,
"min": 1.987874747866325e-08,
"max": 0.009012238495051861,
"count": 1754
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.004174860217608512,
"min": 1.987874747866325e-08,
"max": 0.009012238495051861,
"count": 1754
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.004174860217608512,
"min": 1.987874747866325e-08,
"max": 0.009012238495051861,
"count": 1754
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.004174860217608512,
"min": 1.987874747866325e-08,
"max": 0.009012238495051861,
"count": 1754
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1754
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1754
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.2,
"min": 0.2,
"max": 0.2,
"count": 1754
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.2,
"min": 0.2,
"max": 0.2,
"count": 1754
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.004999999999999999,
"min": 0.004999999999999999,
"max": 0.004999999999999999,
"count": 1754
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.004999999999999999,
"min": 0.004999999999999999,
"max": 0.004999999999999999,
"count": 1754
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1694066726",
"python_version": "3.9.17 (main, Jul 5 2023, 20:41:20) \n[GCC 11.2.0]",
"command_line_arguments": "/home/acm/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./trained-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu113",
"numpy_version": "1.21.2",
"end_time_seconds": "1694104736"
},
"total": 38009.418713622,
"count": 1,
"self": 0.16675795900664525,
"children": {
"run_training.setup": {
"total": 0.0044328529984341,
"count": 1,
"self": 0.0044328529984341
},
"TrainerController.start_learning": {
"total": 38009.247522809994,
"count": 1,
"self": 16.779527878439694,
"children": {
"TrainerController._reset_env": {
"total": 12.369896524967771,
"count": 192,
"self": 12.369896524967771
},
"TrainerController.advance": {
"total": 37977.201706746586,
"count": 2497504,
"self": 17.63627195636218,
"children": {
"env_step": {
"total": 25689.303988712014,
"count": 2497504,
"self": 20316.80456168284,
"children": {
"SubprocessEnvManager._take_step": {
"total": 5361.335743209242,
"count": 2497504,
"self": 139.39434876448286,
"children": {
"TorchPolicy.evaluate": {
"total": 5221.941394444759,
"count": 4954172,
"self": 5221.941394444759
}
}
},
"workers": {
"total": 11.163683819933794,
"count": 2497504,
"self": 0.0,
"children": {
"worker_root": {
"total": 37940.311905019604,
"count": 2497504,
"is_parallel": true,
"self": 20382.960668171065,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018097739994118456,
"count": 2,
"is_parallel": true,
"self": 0.0003727360090124421,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014370379903994035,
"count": 8,
"is_parallel": true,
"self": 0.0014370379903994035
}
}
},
"UnityEnvironment.step": {
"total": 0.013925006998761091,
"count": 1,
"is_parallel": true,
"self": 0.0004372239964141045,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003484500011836644,
"count": 1,
"is_parallel": true,
"self": 0.0003484500011836644
},
"communicator.exchange": {
"total": 0.011721148999640718,
"count": 1,
"is_parallel": true,
"self": 0.011721148999640718
},
"steps_from_proto": {
"total": 0.0014181840015226044,
"count": 2,
"is_parallel": true,
"self": 0.00026026300474768505,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011579209967749193,
"count": 8,
"is_parallel": true,
"self": 0.0011579209967749193
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.26497412007302046,
"count": 382,
"is_parallel": true,
"self": 0.04817248907056637,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.2168016310024541,
"count": 1528,
"is_parallel": true,
"self": 0.2168016310024541
}
}
},
"UnityEnvironment.step": {
"total": 17557.086262728466,
"count": 2497503,
"is_parallel": true,
"self": 1040.7441306267792,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 696.1562557399302,
"count": 2497503,
"is_parallel": true,
"self": 696.1562557399302
},
"communicator.exchange": {
"total": 12537.580525180903,
"count": 2497503,
"is_parallel": true,
"self": 12537.580525180903
},
"steps_from_proto": {
"total": 3282.605351180853,
"count": 4995006,
"is_parallel": true,
"self": 520.684407304172,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2761.920943876681,
"count": 19980024,
"is_parallel": true,
"self": 2761.920943876681
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 12270.26144607821,
"count": 2497504,
"self": 223.05618517052426,
"children": {
"process_trajectory": {
"total": 1873.0487656927762,
"count": 2497504,
"self": 1659.739481237826,
"children": {
"RLTrainer._checkpoint": {
"total": 213.3092844549501,
"count": 77,
"self": 213.3092844549501
}
}
},
"_update_policy": {
"total": 10174.15649521491,
"count": 1754,
"self": 4030.5775346320224,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6143.578960582887,
"count": 17540,
"self": 6143.578960582887
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.00003045797348e-07,
"count": 1,
"self": 4.00003045797348e-07
},
"TrainerController._save_models": {
"total": 2.896391259993834,
"count": 1,
"self": 0.13090770099370275,
"children": {
"RLTrainer._checkpoint": {
"total": 2.765483559000131,
"count": 1,
"self": 2.765483559000131
}
}
}
}
}
}
}