ppo-Huggy / run_logs /timers.json
oemerfurkan's picture
Create and train Huggy
df90151 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4060379266738892,
"min": 1.4060379266738892,
"max": 1.4279805421829224,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69833.6875,
"min": 69319.546875,
"max": 76673.0625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 74.13383458646616,
"min": 71.88483965014578,
"max": 402.68,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49299.0,
"min": 49216.0,
"max": 50335.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999969.0,
"min": 49762.0,
"max": 1999969.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999969.0,
"min": 49762.0,
"max": 1999969.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5523173809051514,
"min": 0.13029798865318298,
"max": 2.5523173809051514,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1697.291015625,
"min": 16.156949996948242,
"max": 1714.25634765625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 4.074969700942362,
"min": 1.6635534407631043,
"max": 4.081191302381609,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2709.854851126671,
"min": 206.28062665462494,
"max": 2716.735749900341,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 4.074969700942362,
"min": 1.6635534407631043,
"max": 4.081191302381609,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2709.854851126671,
"min": 206.28062665462494,
"max": 2716.735749900341,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014249569371006347,
"min": 0.013970194883828904,
"max": 0.019142484311952205,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04274870811301904,
"min": 0.02794038976765781,
"max": 0.05742745293585662,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05880469779173533,
"min": 0.021581099492808183,
"max": 0.06738798171281815,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.176414093375206,
"min": 0.043162198985616365,
"max": 0.1809960118184487,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.883898705400004e-06,
"min": 3.883898705400004e-06,
"max": 0.00029530657656447494,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1651696116200011e-05,
"min": 1.1651696116200011e-05,
"max": 0.00084402136865955,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1012946,
"min": 0.1012946,
"max": 0.19843552500000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3038838,
"min": 0.20773195000000008,
"max": 0.58134045,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.460054000000005e-05,
"min": 7.460054000000005e-05,
"max": 0.004921932697500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022380162000000016,
"min": 0.00022380162000000016,
"max": 0.014068888454999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739464854",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739467364"
},
"total": 2509.9530628360003,
"count": 1,
"self": 0.4470701699997335,
"children": {
"run_training.setup": {
"total": 0.02273613600027602,
"count": 1,
"self": 0.02273613600027602
},
"TrainerController.start_learning": {
"total": 2509.4832565300003,
"count": 1,
"self": 4.6990519549485725,
"children": {
"TrainerController._reset_env": {
"total": 2.98906844000021,
"count": 1,
"self": 2.98906844000021
},
"TrainerController.advance": {
"total": 2501.6842862890526,
"count": 233219,
"self": 4.989908688264222,
"children": {
"env_step": {
"total": 2017.1294193010913,
"count": 233219,
"self": 1584.9659377077942,
"children": {
"SubprocessEnvManager._take_step": {
"total": 429.41270590808244,
"count": 233219,
"self": 15.992108361060218,
"children": {
"TorchPolicy.evaluate": {
"total": 413.4205975470222,
"count": 222889,
"self": 413.4205975470222
}
}
},
"workers": {
"total": 2.7507756852146485,
"count": 233219,
"self": 0.0,
"children": {
"worker_root": {
"total": 2501.762735142152,
"count": 233219,
"is_parallel": true,
"self": 1211.283076121204,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010434669998176105,
"count": 1,
"is_parallel": true,
"self": 0.00038260299970716005,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006608640001104504,
"count": 2,
"is_parallel": true,
"self": 0.0006608640001104504
}
}
},
"UnityEnvironment.step": {
"total": 0.029393911000170192,
"count": 1,
"is_parallel": true,
"self": 0.0003392880003048049,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019809300010820152,
"count": 1,
"is_parallel": true,
"self": 0.00019809300010820152
},
"communicator.exchange": {
"total": 0.02813811399983024,
"count": 1,
"is_parallel": true,
"self": 0.02813811399983024
},
"steps_from_proto": {
"total": 0.0007184159999269468,
"count": 1,
"is_parallel": true,
"self": 0.0002002379997065873,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005181780002203595,
"count": 2,
"is_parallel": true,
"self": 0.0005181780002203595
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1290.479659020948,
"count": 233218,
"is_parallel": true,
"self": 38.39814206402434,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.97188431295444,
"count": 233218,
"is_parallel": true,
"self": 83.97188431295444
},
"communicator.exchange": {
"total": 1077.799530979922,
"count": 233218,
"is_parallel": true,
"self": 1077.799530979922
},
"steps_from_proto": {
"total": 90.31010166404712,
"count": 233218,
"is_parallel": true,
"self": 33.90309408270832,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.4070075813388,
"count": 466436,
"is_parallel": true,
"self": 56.4070075813388
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 479.5649582996971,
"count": 233219,
"self": 6.791844096644127,
"children": {
"process_trajectory": {
"total": 178.05205400205432,
"count": 233219,
"self": 176.79672477705344,
"children": {
"RLTrainer._checkpoint": {
"total": 1.255329225000878,
"count": 10,
"self": 1.255329225000878
}
}
},
"_update_policy": {
"total": 294.72106020099864,
"count": 97,
"self": 231.4608522080066,
"children": {
"TorchPPOOptimizer.update": {
"total": 63.26020799299204,
"count": 2910,
"self": 63.26020799299204
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.709992809803225e-07,
"count": 1,
"self": 9.709992809803225e-07
},
"TrainerController._save_models": {
"total": 0.1108488749996468,
"count": 1,
"self": 0.0018239279988847557,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10902494700076204,
"count": 1,
"self": 0.10902494700076204
}
}
}
}
}
}
}