philippds's picture
Upload 13 files
358f719 verified
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.357200026512146,
"min": 1.3571773767471313,
"max": 1.4276233911514282,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 8415.9970703125,
"min": 6634.6884765625,
"max": 9930.4248046875,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 0.9393939393939394,
"min": 0.4166666666666667,
"max": 1.0,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 62.0,
"min": 10.0,
"max": 130.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 9.848484848484848,
"min": 6.095238095238095,
"max": 44.766666666666666,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 650.0,
"min": 128.0,
"max": 1550.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 0.06060606060606061,
"min": 0.0,
"max": 0.4583333333333333,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 4.0,
"min": 0.0,
"max": 11.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 57.68125317313454,
"min": 13.850637122436806,
"max": 69.52353069829006,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 3806.96270942688,
"min": 353.395546913147,
"max": 6487.021728515625,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 3.5693615932356226,
"min": 1.2718986133734385,
"max": 6.352038798508821,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 235.5778651535511,
"min": 34.720767974853516,
"max": 582.2078572884202,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 1.623553609306162,
"min": 0.1814257820447286,
"max": 2.4614930526882994,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 107.1545382142067,
"min": 5.442773461341858,
"max": 189.75158336758614,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.16235536088546118,
"min": 0.018142578254143397,
"max": 0.24614930488899642,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 10.715453818440437,
"min": 0.5442773476243019,
"max": 18.975158277899027,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 71.0193664377386,
"min": 22.019399881362915,
"max": 78.46564558077426,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 4687.278184890747,
"min": 528.46559715271,
"max": 9282.112834453583,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 93.0,
"min": 43.02564102564103,
"max": 305.42857142857144,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 6138.0,
"min": 4806.0,
"max": 7059.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199972.0,
"min": 5673.0,
"max": 1199972.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199972.0,
"min": 5673.0,
"max": 1199972.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.22229573130607605,
"min": 0.013134981505572796,
"max": 0.9923661947250366,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 14.671518325805664,
"min": 0.39404943585395813,
"max": 35.86885452270508,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 48.32444381713867,
"min": 0.05967150628566742,
"max": 56.38117599487305,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 3189.413330078125,
"min": 1.4917876720428467,
"max": 6985.7470703125,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 101.05303009892955,
"min": 45.02783600866795,
"max": 106.22177314758301,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 6669.49998652935,
"min": 1062.1014396548271,
"max": 13739.857434153557,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 0.1938589338043874,
"min": 0.0,
"max": 6.21684675505667,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 12.794689631089568,
"min": 0.0,
"max": 205.15594291687012,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 90.94773094401215,
"min": 40.525054353922606,
"max": 95.59959813526699,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 6002.550242304802,
"min": 955.8913725614548,
"max": 12365.871996223927,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.02042245038319379,
"min": 0.014539667773836603,
"max": 0.035358245174090065,
"count": 137
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.02042245038319379,
"min": 0.014539667773836603,
"max": 0.035358245174090065,
"count": 137
},
"Agent.Losses.ValueLoss.mean": {
"value": 64.42997614542644,
"min": 30.159402052561443,
"max": 149.21976121266684,
"count": 137
},
"Agent.Losses.ValueLoss.sum": {
"value": 64.42997614542644,
"min": 30.159402052561443,
"max": 149.21976121266684,
"count": 137
},
"Agent.Policy.LearningRate.mean": {
"value": 2.1188492937499913e-06,
"min": 2.1188492937499913e-06,
"max": 0.00029776350074549996,
"count": 137
},
"Agent.Policy.LearningRate.sum": {
"value": 2.1188492937499913e-06,
"min": 2.1188492937499913e-06,
"max": 0.00029776350074549996,
"count": 137
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10070625,
"min": 0.10070625,
"max": 0.1992545,
"count": 137
},
"Agent.Policy.Epsilon.sum": {
"value": 0.10070625,
"min": 0.10070625,
"max": 0.1992545,
"count": 137
},
"Agent.Policy.Beta.mean": {
"value": 4.5241874999999854e-05,
"min": 4.5241874999999854e-05,
"max": 0.00496279955,
"count": 137
},
"Agent.Policy.Beta.sum": {
"value": 4.5241874999999854e-05,
"min": 4.5241874999999854e-05,
"max": 0.00496279955,
"count": 137
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.020843641832470894,
"min": 0.017696548253297806,
"max": 0.6002316027879715,
"count": 137
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.020843641832470894,
"min": 0.017696548253297806,
"max": 0.6002316027879715,
"count": 137
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 2.358081857363383,
"min": 2.195520669221878,
"max": 3.353024572134018,
"count": 137
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 2.358081857363383,
"min": 2.195520669221878,
"max": 3.353024572134018,
"count": 137
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717613747",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_4_task_3_run_id_2_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_3_run_id_2_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1717618844"
},
"total": 5097.187284,
"count": 1,
"self": 0.6515434999992067,
"children": {
"run_training.setup": {
"total": 0.05466150000000003,
"count": 1,
"self": 0.05466150000000003
},
"TrainerController.start_learning": {
"total": 5096.481079,
"count": 1,
"self": 9.771768799952042,
"children": {
"TrainerController._reset_env": {
"total": 2.23501,
"count": 1,
"self": 2.23501
},
"TrainerController.advance": {
"total": 5084.234055200048,
"count": 403389,
"self": 9.597515900173676,
"children": {
"env_step": {
"total": 5074.636539299874,
"count": 403389,
"self": 2543.8344997997788,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2524.6346623000245,
"count": 403389,
"self": 16.688021499901424,
"children": {
"TorchPolicy.evaluate": {
"total": 2507.946640800123,
"count": 400075,
"self": 2507.946640800123
}
}
},
"workers": {
"total": 6.167377200070633,
"count": 403389,
"self": 0.0,
"children": {
"worker_root": {
"total": 5085.677875099918,
"count": 403389,
"is_parallel": true,
"self": 2862.7683938999307,
"children": {
"steps_from_proto": {
"total": 0.008082800000000168,
"count": 1,
"is_parallel": true,
"self": 0.00018680000000026453,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.0078096999999999195,
"count": 2,
"is_parallel": true,
"self": 4.7499999999978115e-05,
"children": {
"_observation_to_np_array": {
"total": 0.007762199999999941,
"count": 3,
"is_parallel": true,
"self": 3.929999999985334e-05,
"children": {
"process_pixels": {
"total": 0.007722900000000088,
"count": 3,
"is_parallel": true,
"self": 0.00026540000000019326,
"children": {
"image_decompress": {
"total": 0.007457499999999895,
"count": 3,
"is_parallel": true,
"self": 0.007457499999999895
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 8.629999999998361e-05,
"count": 2,
"is_parallel": true,
"self": 8.629999999998361e-05
}
}
},
"UnityEnvironment.step": {
"total": 2222.9013983999876,
"count": 403389,
"is_parallel": true,
"self": 26.0621392999783,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.664956999852585,
"count": 403389,
"is_parallel": true,
"self": 26.664956999852585
},
"communicator.exchange": {
"total": 1956.293733199997,
"count": 403389,
"is_parallel": true,
"self": 1956.293733199997
},
"steps_from_proto": {
"total": 213.8805689001597,
"count": 403389,
"is_parallel": true,
"self": 43.15751690001238,
"children": {
"_process_maybe_compressed_observation": {
"total": 153.17746049990848,
"count": 806778,
"is_parallel": true,
"self": 11.759790399618026,
"children": {
"_observation_to_np_array": {
"total": 141.41767010029045,
"count": 1210827,
"is_parallel": true,
"self": 11.159318700508095,
"children": {
"process_pixels": {
"total": 130.25835139978236,
"count": 1210827,
"is_parallel": true,
"self": 60.515507799460096,
"children": {
"image_decompress": {
"total": 69.74284360032226,
"count": 1210827,
"is_parallel": true,
"self": 69.74284360032226
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 17.545591500238817,
"count": 806778,
"is_parallel": true,
"self": 17.545591500238817
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.440000000409782e-05,
"count": 1,
"self": 3.440000000409782e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 5090.927058500024,
"count": 253109,
"is_parallel": true,
"self": 8.816564300191203,
"children": {
"process_trajectory": {
"total": 4196.737856099834,
"count": 253109,
"is_parallel": true,
"self": 4196.175482499833,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5623736000002282,
"count": 2,
"is_parallel": true,
"self": 0.5623736000002282
}
}
},
"_update_policy": {
"total": 885.3726380999992,
"count": 137,
"is_parallel": true,
"self": 598.9413355999843,
"children": {
"TorchPPOOptimizer.update": {
"total": 286.4313025000149,
"count": 3348,
"is_parallel": true,
"self": 286.4313025000149
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.24021059999995487,
"count": 1,
"self": 0.007300899999791,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23290970000016387,
"count": 1,
"self": 0.23290970000016387
}
}
}
}
}
}
}