ppo-Pyramids / run_logs /timers.json
bsenst's picture
add ppo pyramids agent
f51d242
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.14276349544525146,
"min": 0.13769082725048065,
"max": 1.4572417736053467,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4319.4521484375,
"min": 4143.943359375,
"max": 44206.88671875,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999979.0,
"min": 29952.0,
"max": 2999979.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999979.0,
"min": 29952.0,
"max": 2999979.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7880120277404785,
"min": -0.13937515020370483,
"max": 0.884117603302002,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 236.4036102294922,
"min": -33.03190994262695,
"max": 270.53997802734375,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.01058814488351345,
"min": -0.01058814488351345,
"max": 0.38161441683769226,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.176443576812744,
"min": -3.176443576812744,
"max": 90.44261932373047,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06460294349970007,
"min": 0.06460294349970007,
"max": 0.07245850729980573,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.904441208995801,
"min": 0.4809648404198955,
"max": 1.0805650154692485,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015231969922232176,
"min": 0.0010879355447108963,
"max": 0.01642314226041682,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21324757891125046,
"min": 0.014143162081241653,
"max": 0.2463471339062523,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4421280907523867e-06,
"min": 1.4421280907523867e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.0189793270533415e-05,
"min": 2.0189793270533415e-05,
"max": 0.004052848449050532,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1004806761904762,
"min": 0.1004806761904762,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4067294666666668,
"min": 1.3962282666666668,
"max": 2.8275573333333335,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.801955142857163e-05,
"min": 5.801955142857163e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008122737200000028,
"min": 0.0008122737200000028,
"max": 0.13509985172,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0068816328421235085,
"min": 0.0068551101721823215,
"max": 0.5026339292526245,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.09634286165237427,
"min": 0.09597153961658478,
"max": 3.518437623977661,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 229.82835820895522,
"min": 203.53571428571428,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30797.0,
"min": 15984.0,
"max": 33616.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7403134184319582,
"min": -1.0000000521540642,
"max": 1.796464268969638,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 233.2019980698824,
"min": -27.647201739251614,
"max": 258.2169983237982,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7403134184319582,
"min": -1.0000000521540642,
"max": 1.796464268969638,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 233.2019980698824,
"min": -27.647201739251614,
"max": 258.2169983237982,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.016465745684997014,
"min": 0.014973602063745213,
"max": 10.631956234574318,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.2064099217896,
"min": 2.088437120008166,
"max": 170.1112997531891,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680810011",
"python_version": "3.10.5 | packaged by conda-forge | (main, Jun 14 2022, 07:04:59) [GCC 10.3.0]",
"command_line_arguments": "ml-agents/mlagents/trainers/learn.py ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680816995"
},
"total": 6983.470412672,
"count": 1,
"self": 0.49378166499991494,
"children": {
"run_training.setup": {
"total": 0.01997928400010096,
"count": 1,
"self": 0.01997928400010096
},
"TrainerController.start_learning": {
"total": 6982.956651723,
"count": 1,
"self": 4.835357799255689,
"children": {
"TrainerController._reset_env": {
"total": 3.3798891459998686,
"count": 1,
"self": 3.3798891459998686
},
"TrainerController.advance": {
"total": 6974.634803342744,
"count": 195694,
"self": 4.595577090563893,
"children": {
"env_step": {
"total": 5169.338238900851,
"count": 195694,
"self": 4856.954858979969,
"children": {
"SubprocessEnvManager._take_step": {
"total": 309.48048991081714,
"count": 195694,
"self": 13.959444717980205,
"children": {
"TorchPolicy.evaluate": {
"total": 295.52104519283694,
"count": 187562,
"self": 295.52104519283694
}
}
},
"workers": {
"total": 2.902890010065221,
"count": 195694,
"self": 0.0,
"children": {
"worker_root": {
"total": 6962.905718054875,
"count": 195694,
"is_parallel": true,
"self": 2467.113556077011,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018009939999501512,
"count": 1,
"is_parallel": true,
"self": 0.0005803920000744256,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012206019998757256,
"count": 8,
"is_parallel": true,
"self": 0.0012206019998757256
}
}
},
"UnityEnvironment.step": {
"total": 0.08273205199998301,
"count": 1,
"is_parallel": true,
"self": 0.0005607649998182751,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005139909999343217,
"count": 1,
"is_parallel": true,
"self": 0.0005139909999343217
},
"communicator.exchange": {
"total": 0.07560907800007044,
"count": 1,
"is_parallel": true,
"self": 0.07560907800007044
},
"steps_from_proto": {
"total": 0.006048218000159977,
"count": 1,
"is_parallel": true,
"self": 0.00040821899983711774,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0056399990003228595,
"count": 8,
"is_parallel": true,
"self": 0.0056399990003228595
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4495.792161977864,
"count": 195693,
"is_parallel": true,
"self": 99.70346102174699,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.05140697701268,
"count": 195693,
"is_parallel": true,
"self": 75.05140697701268
},
"communicator.exchange": {
"total": 4017.900902720913,
"count": 195693,
"is_parallel": true,
"self": 4017.900902720913
},
"steps_from_proto": {
"total": 303.1363912581919,
"count": 195693,
"is_parallel": true,
"self": 68.8161426400427,
"children": {
"_process_rank_one_or_two_observation": {
"total": 234.3202486181492,
"count": 1565544,
"is_parallel": true,
"self": 234.3202486181492
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1800.7009873513293,
"count": 195694,
"self": 8.696883420345102,
"children": {
"process_trajectory": {
"total": 332.2479694359938,
"count": 195694,
"self": 331.58103084599384,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6669385899999725,
"count": 6,
"self": 0.6669385899999725
}
}
},
"_update_policy": {
"total": 1459.7561344949904,
"count": 1406,
"self": 974.2329162310107,
"children": {
"TorchPPOOptimizer.update": {
"total": 485.52321826397974,
"count": 68394,
"self": 485.52321826397974
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.930001058615744e-07,
"count": 1,
"self": 8.930001058615744e-07
},
"TrainerController._save_models": {
"total": 0.1066005419997964,
"count": 1,
"self": 0.0015894160005700542,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10501112599922635,
"count": 1,
"self": 0.10501112599922635
}
}
}
}
}
}
}