philippds's picture
Upload 13 files
c5878ed verified
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.4249943494796753,
"min": 1.4227579832077026,
"max": 1.4399878978729248,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 8430.2666015625,
"min": 7667.666015625,
"max": 10151.478515625,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 1.0,
"min": 0.5333333333333333,
"max": 1.5333333333333334,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 15.0,
"min": 8.0,
"max": 23.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 8.866666666666667,
"min": 8.0,
"max": 66.86666666666666,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 133.0,
"min": 120.0,
"max": 1058.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 1.0,
"min": 0.3333333333333333,
"max": 1.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 15.0,
"min": 5.0,
"max": 18.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 52.56582450866699,
"min": 14.188481012980143,
"max": 141.06962882147894,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 788.4873676300049,
"min": 212.82721519470215,
"max": 2539.253318786621,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 6.8370773951212565,
"min": 0.12581461668014526,
"max": 10.87903167406718,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 102.55616092681885,
"min": 2.2646631002426147,
"max": 174.39273007959127,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 1.3094838937123616,
"min": 0.13647641407118904,
"max": 5.099398565292359,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 19.642258405685425,
"min": 2.4565754532814026,
"max": 88.14770412445068,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.13094839106003445,
"min": 0.013647641158766217,
"max": 0.5099398593107859,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 1.9642258659005165,
"min": 0.2456575408577919,
"max": 8.814770430326462,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 42.58608170747757,
"min": 22.97467691898346,
"max": 103.82445068359375,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 638.7912256121635,
"min": 344.6201537847519,
"max": 1557.3667602539062,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 393.2,
"min": 317.0,
"max": 399.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 5898.0,
"min": 5355.0,
"max": 7104.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199853.0,
"min": 5987.0,
"max": 1199853.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199853.0,
"min": 5987.0,
"max": 1199853.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.2796717882156372,
"min": 0.028826581314206123,
"max": 0.8988076448440552,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 4.195076942443848,
"min": 0.4323987066745758,
"max": 15.711792945861816,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.693463921546936,
"min": -0.07089892029762268,
"max": 2.0063657760620117,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 10.401958465576172,
"min": -1.0634838342666626,
"max": 34.108219146728516,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 8.967432228724162,
"min": -0.6734819342108334,
"max": 16.533345870673656,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 134.51148343086243,
"min": -11.449192881584167,
"max": 264.5335339307785,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 1.1259813189506531,
"min": 0.0,
"max": 13.342773628234863,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 16.889719784259796,
"min": 0.0,
"max": 200.14160442352295,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 8.070687929789225,
"min": -0.606133808107937,
"max": 14.880010407418013,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 121.06031894683838,
"min": -10.30427473783493,
"max": 238.0801665186882,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.022717596769022446,
"min": 0.014619453519117087,
"max": 0.033138667892975114,
"count": 134
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.022717596769022446,
"min": 0.014619453519117087,
"max": 0.033138667892975114,
"count": 134
},
"Agent.Losses.ValueLoss.mean": {
"value": 0.5063388161361217,
"min": 0.11967072682455182,
"max": 1.6857516368230183,
"count": 134
},
"Agent.Losses.ValueLoss.sum": {
"value": 0.5063388161361217,
"min": 0.11967072682455182,
"max": 1.6857516368230183,
"count": 134
},
"Agent.Policy.LearningRate.mean": {
"value": 1.5180994939999858e-06,
"min": 1.5180994939999858e-06,
"max": 0.00029780325073225,
"count": 134
},
"Agent.Policy.LearningRate.sum": {
"value": 1.5180994939999858e-06,
"min": 1.5180994939999858e-06,
"max": 0.00029780325073225,
"count": 134
},
"Agent.Policy.Epsilon.mean": {
"value": 0.100506,
"min": 0.100506,
"max": 0.19926775000000005,
"count": 134
},
"Agent.Policy.Epsilon.sum": {
"value": 0.100506,
"min": 0.100506,
"max": 0.19926775000000005,
"count": 134
},
"Agent.Policy.Beta.mean": {
"value": 3.524939999999977e-05,
"min": 3.524939999999977e-05,
"max": 0.004963460725,
"count": 134
},
"Agent.Policy.Beta.sum": {
"value": 3.524939999999977e-05,
"min": 3.524939999999977e-05,
"max": 0.004963460725,
"count": 134
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.028080025765423972,
"min": 0.026758555478105944,
"max": 0.6027635087569555,
"count": 134
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.028080025765423972,
"min": 0.026758555478105944,
"max": 0.6027635087569555,
"count": 134
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 2.2533761163552604,
"min": 2.1977369961915194,
"max": 3.315477500359217,
"count": 134
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 2.2533761163552604,
"min": 2.1977369961915194,
"max": 3.315477500359217,
"count": 134
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717441545",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_2_task_0_run_id_2_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_2_task_0_run_id_2_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1717445648"
},
"total": 4103.1231244,
"count": 1,
"self": 0.2902927000004638,
"children": {
"run_training.setup": {
"total": 0.05569630000000003,
"count": 1,
"self": 0.05569630000000003
},
"TrainerController.start_learning": {
"total": 4102.7771354,
"count": 1,
"self": 8.979918300010468,
"children": {
"TrainerController._reset_env": {
"total": 2.1024739,
"count": 1,
"self": 2.1024739
},
"TrainerController.advance": {
"total": 4091.5098122999893,
"count": 401290,
"self": 8.225682399941434,
"children": {
"env_step": {
"total": 4083.284129900048,
"count": 401290,
"self": 1899.7643318000673,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2178.3107867999424,
"count": 401290,
"self": 14.252428199989936,
"children": {
"TorchPolicy.evaluate": {
"total": 2164.0583585999525,
"count": 400343,
"self": 2164.0583585999525
}
}
},
"workers": {
"total": 5.209011300038221,
"count": 401290,
"self": 0.0,
"children": {
"worker_root": {
"total": 4093.1162857999034,
"count": 401290,
"is_parallel": true,
"self": 2468.1610532999157,
"children": {
"steps_from_proto": {
"total": 0.006260000000000154,
"count": 1,
"is_parallel": true,
"self": 0.00010880000000024204,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.006090599999999835,
"count": 2,
"is_parallel": true,
"self": 2.989999999969406e-05,
"children": {
"_observation_to_np_array": {
"total": 0.006060700000000141,
"count": 3,
"is_parallel": true,
"self": 3.0799999999997496e-05,
"children": {
"process_pixels": {
"total": 0.0060299000000001435,
"count": 3,
"is_parallel": true,
"self": 0.00023520000000032404,
"children": {
"image_decompress": {
"total": 0.0057946999999998194,
"count": 3,
"is_parallel": true,
"self": 0.0057946999999998194
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 6.060000000007726e-05,
"count": 2,
"is_parallel": true,
"self": 6.060000000007726e-05
}
}
},
"UnityEnvironment.step": {
"total": 1624.9489724999878,
"count": 401290,
"is_parallel": true,
"self": 22.823380899882068,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.756187400049537,
"count": 401290,
"is_parallel": true,
"self": 24.756187400049537
},
"communicator.exchange": {
"total": 1379.7467344000777,
"count": 401290,
"is_parallel": true,
"self": 1379.7467344000777
},
"steps_from_proto": {
"total": 197.62266979997844,
"count": 401290,
"is_parallel": true,
"self": 40.7011111000688,
"children": {
"_process_maybe_compressed_observation": {
"total": 140.35204550000455,
"count": 802580,
"is_parallel": true,
"self": 10.89967610011061,
"children": {
"_observation_to_np_array": {
"total": 129.45236939989394,
"count": 1204164,
"is_parallel": true,
"self": 10.358699900092162,
"children": {
"process_pixels": {
"total": 119.09366949980178,
"count": 1204164,
"is_parallel": true,
"self": 56.02922529970019,
"children": {
"image_decompress": {
"total": 63.06444420010159,
"count": 1204164,
"is_parallel": true,
"self": 63.06444420010159
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 16.56951319990508,
"count": 802580,
"is_parallel": true,
"self": 16.56951319990508
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.419999939069385e-05,
"count": 1,
"self": 3.419999939069385e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 4098.314436299989,
"count": 202075,
"is_parallel": true,
"self": 6.660855800016634,
"children": {
"process_trajectory": {
"total": 3297.881800399975,
"count": 202075,
"is_parallel": true,
"self": 3297.385383799975,
"children": {
"RLTrainer._checkpoint": {
"total": 0.496416599999975,
"count": 2,
"is_parallel": true,
"self": 0.496416599999975
}
}
},
"_update_policy": {
"total": 793.7717800999977,
"count": 134,
"is_parallel": true,
"self": 538.7700476999878,
"children": {
"TorchPPOOptimizer.update": {
"total": 255.00173240000998,
"count": 3354,
"is_parallel": true,
"self": 255.00173240000998
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.18489670000053593,
"count": 1,
"self": 0.006239500000447151,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17865720000008878,
"count": 1,
"self": 0.17865720000008878
}
}
}
}
}
}
}