María Navas Loro
First Push hf-rl-SoccerTwos
a53a192
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.162141799926758,
"min": 3.1400129795074463,
"max": 3.295738697052002,
"count": 50
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 34201.7265625,
"min": 19557.10546875,
"max": 105463.640625,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 984.2,
"min": 465.3636363636364,
"max": 999.0,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19684.0,
"min": 7992.0,
"max": 26776.0,
"count": 50
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1205.1040160627358,
"min": 1197.3876853609286,
"max": 1205.1040160627358,
"count": 40
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2410.2080321254716,
"min": 2394.775370721857,
"max": 14396.067827112633,
"count": 40
},
"SoccerTwos.Step.mean": {
"value": 499772.0,
"min": 9004.0,
"max": 499772.0,
"count": 50
},
"SoccerTwos.Step.sum": {
"value": 499772.0,
"min": 9004.0,
"max": 499772.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.004239015746861696,
"min": -0.006967315915971994,
"max": 0.035074543207883835,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.04239015653729439,
"min": -0.15311077237129211,
"max": 0.5261181592941284,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0037288989406079054,
"min": -0.0060204630717635155,
"max": 0.03507835045456886,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.03728898987174034,
"min": -0.11875440180301666,
"max": 0.5261498689651489,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.2,
"min": -0.3410133322079976,
"max": 0.29264615590755755,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -2.0,
"min": -5.115199983119965,
"max": 3.8044000267982483,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.2,
"min": -0.3410133322079976,
"max": 0.29264615590755755,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -2.0,
"min": -5.115199983119965,
"max": 3.8044000267982483,
"count": 50
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015634879852101827,
"min": 0.012288765522680479,
"max": 0.0204177543365707,
"count": 23
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015634879852101827,
"min": 0.012288765522680479,
"max": 0.0204177543365707,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0016059685226840277,
"min": 4.978458052088778e-06,
"max": 0.0042685117262105145,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0016059685226840277,
"min": 4.978458052088778e-06,
"max": 0.0042685117262105145,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0016188689352323613,
"min": 4.727022671128604e-06,
"max": 0.004330196542044481,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0016188689352323613,
"min": 4.727022671128604e-06,
"max": 0.004330196542044481,
"count": 23
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676554629",
"python_version": "3.9.16 (main, Jan 11 2023, 16:16:36) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\mnavas\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1676557471"
},
"total": 2842.8831073,
"count": 1,
"self": 0.5995425999999497,
"children": {
"run_training.setup": {
"total": 0.17743529999999996,
"count": 1,
"self": 0.17743529999999996
},
"TrainerController.start_learning": {
"total": 2842.1061294,
"count": 1,
"self": 1.4527519999878677,
"children": {
"TrainerController._reset_env": {
"total": 9.399107499999898,
"count": 3,
"self": 9.399107499999898
},
"TrainerController.advance": {
"total": 2831.0128665000125,
"count": 32681,
"self": 1.6907152000262613,
"children": {
"env_step": {
"total": 1322.226395700006,
"count": 32681,
"self": 1057.5326007000126,
"children": {
"SubprocessEnvManager._take_step": {
"total": 263.6814277999666,
"count": 32681,
"self": 9.69075990004535,
"children": {
"TorchPolicy.evaluate": {
"total": 253.99066789992128,
"count": 64900,
"self": 253.99066789992128
}
}
},
"workers": {
"total": 1.012367200026695,
"count": 32681,
"self": 0.0,
"children": {
"worker_root": {
"total": 2829.199310699984,
"count": 32681,
"is_parallel": true,
"self": 1969.5009470999762,
"children": {
"steps_from_proto": {
"total": 0.0080111999997996,
"count": 6,
"is_parallel": true,
"self": 0.001713299999936524,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.006297899999863077,
"count": 24,
"is_parallel": true,
"self": 0.006297899999863077
}
}
},
"UnityEnvironment.step": {
"total": 859.6903524000078,
"count": 32681,
"is_parallel": true,
"self": 44.88515310002151,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 35.62281369999646,
"count": 32681,
"is_parallel": true,
"self": 35.62281369999646
},
"communicator.exchange": {
"total": 631.1723319999983,
"count": 32681,
"is_parallel": true,
"self": 631.1723319999983
},
"steps_from_proto": {
"total": 148.01005359999155,
"count": 65362,
"is_parallel": true,
"self": 28.905948100060698,
"children": {
"_process_rank_one_or_two_observation": {
"total": 119.10410549993085,
"count": 261448,
"is_parallel": true,
"self": 119.10410549993085
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1507.0957555999803,
"count": 32681,
"self": 9.194271399996296,
"children": {
"process_trajectory": {
"total": 216.2232937999846,
"count": 32681,
"self": 215.78205509998438,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4412387000002127,
"count": 1,
"self": 0.4412387000002127
}
}
},
"_update_policy": {
"total": 1281.6781903999995,
"count": 23,
"self": 134.79763039999943,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1146.88056,
"count": 696,
"self": 1146.88056
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.500000053056283e-06,
"count": 1,
"self": 1.500000053056283e-06
},
"TrainerController._save_models": {
"total": 0.24140189999980066,
"count": 1,
"self": 0.003160699999625649,
"children": {
"RLTrainer._checkpoint": {
"total": 0.238241200000175,
"count": 1,
"self": 0.238241200000175
}
}
}
}
}
}
}