poca-SoccerTwos / run_logs /timers.json
ramsi-k's picture
First Push
51971e6 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.9886515140533447,
"min": 2.8776695728302,
"max": 3.2957441806793213,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 77083.296875,
"min": 21003.193359375,
"max": 143326.15625,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 348.14285714285717,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 13740.0,
"max": 28680.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1164.848985779112,
"min": 1164.5178833728528,
"max": 1199.6461445698528,
"count": 253
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2329.697971558224,
"min": 2329.678079995595,
"max": 21226.461981190987,
"count": 253
},
"SoccerTwos.Step.mean": {
"value": 4999488.0,
"min": 9358.0,
"max": 4999488.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999488.0,
"min": 9358.0,
"max": 4999488.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0003431244404055178,
"min": -0.028341375291347504,
"max": 0.0887782946228981,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.0034312442876398563,
"min": -0.42512062191963196,
"max": 1.1541178226470947,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0003485643828753382,
"min": -0.028018254786729813,
"max": 0.0887521505355835,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.003485643770545721,
"min": -0.4202738106250763,
"max": 1.1537779569625854,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.631578947368421,
"max": 0.44071110751893783,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -12.0,
"max": 7.932799935340881,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.631578947368421,
"max": 0.44071110751893783,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -12.0,
"max": 7.932799935340881,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.0167025576151597,
"min": 0.01178024789793805,
"max": 0.023278041661251338,
"count": 231
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.0167025576151597,
"min": 0.01178024789793805,
"max": 0.023278041661251338,
"count": 231
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 1.2005250620283903e-06,
"min": 6.482456615941601e-09,
"max": 0.007855950311447183,
"count": 231
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 1.2005250620283903e-06,
"min": 6.482456615941601e-09,
"max": 0.007855950311447183,
"count": 231
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 1.2068932861393952e-06,
"min": 7.715949938239911e-09,
"max": 0.007875663150722781,
"count": 231
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 1.2068932861393952e-06,
"min": 7.715949938239911e-09,
"max": 0.007875663150722781,
"count": 231
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 231
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 231
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 231
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 231
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 231
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 231
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1707287606",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\ramsi\\anaconda3\\envs\\hfrl\\Scripts\\mlagents-learn C:\\Users\\ramsi\\OneDrive\\Documents\\Github Projects\\reinforcement-learning\\HuggingFace-RLCourse\\ml-agents\\config\\poca\\SoccerTwos.yaml --env=C:\\Users\\ramsi\\OneDrive\\Documents\\Github Projects\\reinforcement-learning\\HuggingFace-RLCourse\\ml-agents\\training-envs-executables\\SoccerTwos\\SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1707296319"
},
"total": 8713.064709100057,
"count": 1,
"self": 2.027184100006707,
"children": {
"run_training.setup": {
"total": 0.09109230001922697,
"count": 1,
"self": 0.09109230001922697
},
"TrainerController.start_learning": {
"total": 8710.946432700031,
"count": 1,
"self": 5.462006586720236,
"children": {
"TrainerController._reset_env": {
"total": 6.314539300277829,
"count": 25,
"self": 6.314539300277829
},
"TrainerController.advance": {
"total": 8699.040651713032,
"count": 326185,
"self": 5.753925517317839,
"children": {
"env_step": {
"total": 4144.400120237493,
"count": 326185,
"self": 3197.684009380988,
"children": {
"SubprocessEnvManager._take_step": {
"total": 943.1199036971666,
"count": 326185,
"self": 35.2558862764854,
"children": {
"TorchPolicy.evaluate": {
"total": 907.8640174206812,
"count": 647666,
"self": 907.8640174206812
}
}
},
"workers": {
"total": 3.596207159338519,
"count": 326185,
"self": 0.0,
"children": {
"worker_root": {
"total": 8698.748673381284,
"count": 326185,
"is_parallel": true,
"self": 6164.275183456251,
"children": {
"steps_from_proto": {
"total": 0.03010450000874698,
"count": 50,
"is_parallel": true,
"self": 0.006091700633987784,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.024012799374759197,
"count": 200,
"is_parallel": true,
"self": 0.024012799374759197
}
}
},
"UnityEnvironment.step": {
"total": 2534.4433854250237,
"count": 326185,
"is_parallel": true,
"self": 118.31500989513006,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 86.19965206331108,
"count": 326185,
"is_parallel": true,
"self": 86.19965206331108
},
"communicator.exchange": {
"total": 1954.621456268127,
"count": 326185,
"is_parallel": true,
"self": 1954.621456268127
},
"steps_from_proto": {
"total": 375.3072671984555,
"count": 652370,
"is_parallel": true,
"self": 74.32913937082049,
"children": {
"_process_rank_one_or_two_observation": {
"total": 300.97812782763503,
"count": 2609480,
"is_parallel": true,
"self": 300.97812782763503
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 4548.8866059582215,
"count": 326185,
"self": 44.64638174988795,
"children": {
"process_trajectory": {
"total": 624.5394466089783,
"count": 326185,
"self": 623.4968685091008,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0425780998775735,
"count": 10,
"self": 1.0425780998775735
}
}
},
"_update_policy": {
"total": 3879.700777599355,
"count": 231,
"self": 513.9343299011234,
"children": {
"TorchPOCAOptimizer.update": {
"total": 3365.766447698232,
"count": 6930,
"self": 3365.766447698232
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.00006853044033e-07,
"count": 1,
"self": 9.00006853044033e-07
},
"TrainerController._save_models": {
"total": 0.12923419999424368,
"count": 1,
"self": 0.013538599945604801,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11569560004863888,
"count": 1,
"self": 0.11569560004863888
}
}
}
}
}
}
}