poca-SoccerTwos / run_logs /timers.json
Leonhard17's picture
1.5e7 epochs`
9a1c205
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6974644660949707,
"min": 1.6479068994522095,
"max": 1.896829605102539,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 35415.8984375,
"min": 28536.375,
"max": 40821.8828125,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 54.78021978021978,
"min": 45.48598130841121,
"max": 93.90566037735849,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19940.0,
"min": 18188.0,
"max": 21376.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1586.4362070420182,
"min": 1540.313808969222,
"max": 1614.8667785937826,
"count": 500
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 288731.3896816473,
"min": 166080.3791611248,
"max": 342651.52787535306,
"count": 500
},
"SoccerTwos.Step.mean": {
"value": 14999834.0,
"min": 10009998.0,
"max": 14999834.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 14999834.0,
"min": 10009998.0,
"max": 14999834.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.09347043931484222,
"min": -0.10996756702661514,
"max": 0.07917854189872742,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -16.918149948120117,
"min": -18.584518432617188,
"max": 14.014601707458496,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0938849151134491,
"min": -0.11217296868562698,
"max": 0.07936329394578934,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -16.9931697845459,
"min": -18.79776382446289,
"max": 13.77039909362793,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.09913370227286829,
"min": -0.3590155045191447,
"max": 0.31450215818213045,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -17.94320011138916,
"min": -51.055600464344025,
"max": 50.304800271987915,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.09913370227286829,
"min": -0.3590155045191447,
"max": 0.31450215818213045,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -17.94320011138916,
"min": -51.055600464344025,
"max": 50.304800271987915,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.013691309865680523,
"min": 0.010207320038656082,
"max": 0.025986198308722428,
"count": 242
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.013691309865680523,
"min": 0.010207320038656082,
"max": 0.025986198308722428,
"count": 242
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11231135825316112,
"min": 0.07683478419979413,
"max": 0.1216113289197286,
"count": 242
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11231135825316112,
"min": 0.07683478419979413,
"max": 0.1216113289197286,
"count": 242
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11509122302134832,
"min": 0.07745104084412256,
"max": 0.12338073030114174,
"count": 242
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11509122302134832,
"min": 0.07745104084412256,
"max": 0.12338073030114174,
"count": 242
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 242
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 242
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 242
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 242
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 242
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 242
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677257660",
"python_version": "3.9.16 (main, Jan 11 2023, 16:16:36) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\leonh\\anaconda3\\envs\\rl_course\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1677275404"
},
"total": 17743.3639587,
"count": 1,
"self": 0.3233462000025611,
"children": {
"run_training.setup": {
"total": 0.10402470000000008,
"count": 1,
"self": 0.10402470000000008
},
"TrainerController.start_learning": {
"total": 17742.9365878,
"count": 1,
"self": 7.322832100213418,
"children": {
"TrainerController._reset_env": {
"total": 3.791751799999756,
"count": 16,
"self": 3.791751799999756
},
"TrainerController.advance": {
"total": 17731.70783189979,
"count": 345148,
"self": 6.805752999469405,
"children": {
"env_step": {
"total": 6258.933678300444,
"count": 345148,
"self": 5078.930546300986,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1175.3040134000476,
"count": 345148,
"self": 39.98794329983343,
"children": {
"TorchPolicy.evaluate": {
"total": 1135.3160701002141,
"count": 626460,
"self": 1135.3160701002141
}
}
},
"workers": {
"total": 4.699118599410042,
"count": 345148,
"self": 0.0,
"children": {
"worker_root": {
"total": 17725.01145240032,
"count": 345148,
"is_parallel": true,
"self": 13473.047958101204,
"children": {
"steps_from_proto": {
"total": 0.029470499995648325,
"count": 32,
"is_parallel": true,
"self": 0.005261399998012006,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.02420909999763632,
"count": 128,
"is_parallel": true,
"self": 0.02420909999763632
}
}
},
"UnityEnvironment.step": {
"total": 4251.934023799118,
"count": 345148,
"is_parallel": true,
"self": 188.7873391995995,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 140.90055439993918,
"count": 345148,
"is_parallel": true,
"self": 140.90055439993918
},
"communicator.exchange": {
"total": 3327.546066999894,
"count": 345148,
"is_parallel": true,
"self": 3327.546066999894
},
"steps_from_proto": {
"total": 594.7000631996857,
"count": 690296,
"is_parallel": true,
"self": 107.05366169907165,
"children": {
"_process_rank_one_or_two_observation": {
"total": 487.64640150061405,
"count": 2761184,
"is_parallel": true,
"self": 487.64640150061405
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 11465.968400599877,
"count": 345148,
"self": 65.8435991999122,
"children": {
"process_trajectory": {
"total": 1772.7973054999507,
"count": 345148,
"self": 1771.5240645999481,
"children": {
"RLTrainer._checkpoint": {
"total": 1.27324090000252,
"count": 10,
"self": 1.27324090000252
}
}
},
"_update_policy": {
"total": 9627.327495900014,
"count": 242,
"self": 777.1713052999894,
"children": {
"TorchPOCAOptimizer.update": {
"total": 8850.156190600024,
"count": 7260,
"self": 8850.156190600024
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1999982234556228e-06,
"count": 1,
"self": 1.1999982234556228e-06
},
"TrainerController._save_models": {
"total": 0.11417079999955604,
"count": 1,
"self": 0.0024827999986882787,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11168800000086776,
"count": 1,
"self": 0.11168800000086776
}
}
}
}
}
}
}