ppo-Pyramids / run_logs /timers.json
Nasotro's picture
First Push
61a1864 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.2891631126403809,
"min": 1.2891631126403809,
"max": 1.4663798809051514,
"count": 7
},
"Pyramids.Policy.Entropy.sum": {
"value": 2640.2060546875,
"min": 2640.2060546875,
"max": 3003.14599609375,
"count": 7
},
"Pyramids.Step.mean": {
"value": 29952.0,
"min": 16896.0,
"max": 29952.0,
"count": 14
},
"Pyramids.Step.sum": {
"value": 29952.0,
"min": 16896.0,
"max": 29952.0,
"count": 14
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.2295531928539276,
"min": -0.31196343898773193,
"max": -0.2295531928539276,
"count": 14
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.836425542831421,
"min": -2.4957075119018555,
"max": -1.7371174097061157,
"count": 14
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.3984936475753784,
"min": 0.3984936475753784,
"max": 0.4315425157546997,
"count": 14
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.1879491806030273,
"min": 2.8811514377593994,
"max": 3.4523401260375977,
"count": 14
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 14
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 14
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0634305022249464,
"min": 0.0634305022249464,
"max": 0.07502602372551337,
"count": 3
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.0634305022249464,
"min": 0.0634305022249464,
"max": 0.07502602372551337,
"count": 3
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0008144267042856276,
"min": 0.0008144267042856276,
"max": 0.0011114171751008446,
"count": 3
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.0008144267042856276,
"min": 0.0008144267042856276,
"max": 0.0011114171751008446,
"count": 3
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.7120094293333343e-05,
"min": 1.7120094293333343e-05,
"max": 9.904006698666665e-05,
"count": 3
},
"Pyramids.Policy.LearningRate.sum": {
"value": 1.7120094293333343e-05,
"min": 1.7120094293333343e-05,
"max": 9.904006698666665e-05,
"count": 3
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10570666666666663,
"min": 0.10570666666666663,
"max": 0.13301333333333337,
"count": 3
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.10570666666666663,
"min": 0.10570666666666663,
"max": 0.13301333333333337,
"count": 3
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0005800960000000005,
"min": 0.0005800960000000005,
"max": 0.0033080320000000002,
"count": 3
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0005800960000000005,
"min": 0.0005800960000000005,
"max": 0.0033080320000000002,
"count": 3
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.18655969202518463,
"min": 0.18655969202518463,
"max": 0.2545681893825531,
"count": 3
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18655969202518463,
"min": 0.18655969202518463,
"max": 0.2545681893825531,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1732748026",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1732748056"
},
"total": 29.80839726499994,
"count": 1,
"self": 0.5302850309998348,
"children": {
"run_training.setup": {
"total": 0.06005067700004929,
"count": 1,
"self": 0.06005067700004929
},
"TrainerController.start_learning": {
"total": 29.218061557000055,
"count": 1,
"self": 0.017323179999266358,
"children": {
"TrainerController._reset_env": {
"total": 2.231318865999924,
"count": 1,
"self": 2.231318865999924
},
"TrainerController.advance": {
"total": 26.83372836300066,
"count": 896,
"self": 0.018904911005279246,
"children": {
"env_step": {
"total": 15.817361654996716,
"count": 896,
"self": 13.460097744994982,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2.3466479920012944,
"count": 896,
"self": 0.06717484699902343,
"children": {
"TorchPolicy.evaluate": {
"total": 2.279473145002271,
"count": 896,
"self": 2.279473145002271
}
}
},
"workers": {
"total": 0.010615918000439706,
"count": 896,
"self": 0.0,
"children": {
"worker_root": {
"total": 28.664034037996657,
"count": 896,
"is_parallel": true,
"self": 16.707145228998343,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022039530000483865,
"count": 1,
"is_parallel": true,
"self": 0.0006743939999296344,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001529559000118752,
"count": 8,
"is_parallel": true,
"self": 0.001529559000118752
}
}
},
"UnityEnvironment.step": {
"total": 0.04874987499999861,
"count": 1,
"is_parallel": true,
"self": 0.0006504980000272553,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044003599998632126,
"count": 1,
"is_parallel": true,
"self": 0.00044003599998632126
},
"communicator.exchange": {
"total": 0.045904174000042985,
"count": 1,
"is_parallel": true,
"self": 0.045904174000042985
},
"steps_from_proto": {
"total": 0.0017551669999420483,
"count": 1,
"is_parallel": true,
"self": 0.0004005529999631108,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013546139999789375,
"count": 8,
"is_parallel": true,
"self": 0.0013546139999789375
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 11.956888808998315,
"count": 895,
"is_parallel": true,
"self": 0.4751948009959506,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.3361890320004477,
"count": 895,
"is_parallel": true,
"self": 0.3361890320004477
},
"communicator.exchange": {
"total": 9.699231907998296,
"count": 895,
"is_parallel": true,
"self": 9.699231907998296
},
"steps_from_proto": {
"total": 1.4462730680036202,
"count": 895,
"is_parallel": true,
"self": 0.2883313829998997,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1.1579416850037205,
"count": 7160,
"is_parallel": true,
"self": 1.1579416850037205
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 10.997461796998664,
"count": 896,
"self": 0.022899022002775382,
"children": {
"process_trajectory": {
"total": 2.055285310995828,
"count": 896,
"self": 2.055285310995828
},
"_update_policy": {
"total": 8.91927746400006,
"count": 3,
"self": 4.761904545001244,
"children": {
"TorchPPOOptimizer.update": {
"total": 4.157372918998817,
"count": 288,
"self": 4.157372918998817
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0490000477147987e-06,
"count": 1,
"self": 1.0490000477147987e-06
},
"TrainerController._save_models": {
"total": 0.13569009900015772,
"count": 1,
"self": 0.0022772410000015952,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13341285800015612,
"count": 1,
"self": 0.13341285800015612
}
}
}
}
}
}
}