philippds's picture
Upload 13 files
f5648bf verified
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.4305307865142822,
"min": 1.4179251194000244,
"max": 1.4377363920211792,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 8402.9375,
"min": 7787.95751953125,
"max": 10217.5185546875,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 1.0666666666666667,
"min": 0.3333333333333333,
"max": 1.4,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 16.0,
"min": 6.0,
"max": 24.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 10.8,
"min": 8.266666666666667,
"max": 52.733333333333334,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 162.0,
"min": 124.0,
"max": 935.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 0.8666666666666667,
"min": 0.4,
"max": 1.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 13.0,
"min": 6.0,
"max": 18.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 64.00649248758951,
"min": 8.590577337476942,
"max": 109.97190755208334,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 960.0973873138428,
"min": 154.63039207458496,
"max": 1649.57861328125,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 8.116427787144978,
"min": 1.3067185004552206,
"max": 13.148702176411946,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 121.74641680717468,
"min": 19.600777506828308,
"max": 197.2305326461792,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 2.2712233940760296,
"min": 0.26030529538790387,
"max": 5.10064845085144,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 34.06835091114044,
"min": 4.466096103191376,
"max": 76.5097267627716,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.22712233712275823,
"min": 0.0260305292904377,
"max": 0.5100648482640584,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 3.4068350568413734,
"min": 0.44660961627960205,
"max": 7.6509727239608765,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 52.35421053568522,
"min": 14.488178213437399,
"max": 111.25537077585857,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 785.3131580352783,
"min": 260.78720784187317,
"max": 2002.596673965454,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 390.6,
"min": 312.8333333333333,
"max": 399.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 5859.0,
"min": 5457.0,
"max": 7146.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199677.0,
"min": 5987.0,
"max": 1199677.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199677.0,
"min": 5987.0,
"max": 1199677.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.5442361831665039,
"min": 0.029143445193767548,
"max": 0.9844069480895996,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 8.163542747497559,
"min": 0.4371516704559326,
"max": 15.750511169433594,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8943437337875366,
"min": -0.0026478245854377747,
"max": 1.2996344566345215,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 13.415156364440918,
"min": -0.03971736878156662,
"max": 19.494516372680664,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 11.246434267361959,
"min": 0.49790438810984294,
"max": 18.70549505551656,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 168.69651401042938,
"min": 7.468565821647644,
"max": 280.5824258327484,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 1.7330764293670655,
"min": 0.0,
"max": 13.387562370300293,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 25.99614644050598,
"min": 0.0,
"max": 200.8134355545044,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 10.121790488560995,
"min": 0.4481136004130046,
"max": 16.83494649330775,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 151.82685732841492,
"min": 6.721704006195068,
"max": 252.52419739961624,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.019706871533007535,
"min": 0.014022891050747907,
"max": 0.03550837947598969,
"count": 137
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.019706871533007535,
"min": 0.014022891050747907,
"max": 0.03550837947598969,
"count": 137
},
"Agent.Losses.ValueLoss.mean": {
"value": 0.7534819333641617,
"min": 0.13195061466346183,
"max": 1.552942191561063,
"count": 137
},
"Agent.Losses.ValueLoss.sum": {
"value": 0.7534819333641617,
"min": 0.13195061466346183,
"max": 1.552942191561063,
"count": 137
},
"Agent.Policy.LearningRate.mean": {
"value": 1.052349649249999e-06,
"min": 1.052349649249999e-06,
"max": 0.00029780325073225,
"count": 137
},
"Agent.Policy.LearningRate.sum": {
"value": 1.052349649249999e-06,
"min": 1.052349649249999e-06,
"max": 0.00029780325073225,
"count": 137
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10035075000000002,
"min": 0.10035075000000002,
"max": 0.19926775000000005,
"count": 137
},
"Agent.Policy.Epsilon.sum": {
"value": 0.10035075000000002,
"min": 0.10035075000000002,
"max": 0.19926775000000005,
"count": 137
},
"Agent.Policy.Beta.mean": {
"value": 2.750242499999999e-05,
"min": 2.750242499999999e-05,
"max": 0.004963460725,
"count": 137
},
"Agent.Policy.Beta.sum": {
"value": 2.750242499999999e-05,
"min": 2.750242499999999e-05,
"max": 0.004963460725,
"count": 137
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.04294379772963347,
"min": 0.03619113999108473,
"max": 0.6027635087569555,
"count": 137
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.04294379772963347,
"min": 0.03619113999108473,
"max": 0.6027635087569555,
"count": 137
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 1.6761144090581823,
"min": 1.639620304107666,
"max": 3.315477500359217,
"count": 137
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 1.6761144090581823,
"min": 1.639620304107666,
"max": 3.315477500359217,
"count": 137
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717777508",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_10_task_0_run_id_2_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_10_task_0_run_id_2_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1717780972"
},
"total": 3463.8833852999996,
"count": 1,
"self": 0.2630401999995229,
"children": {
"run_training.setup": {
"total": 0.04985329999999999,
"count": 1,
"self": 0.04985329999999999
},
"TrainerController.start_learning": {
"total": 3463.5704918,
"count": 1,
"self": 5.068986999986009,
"children": {
"TrainerController._reset_env": {
"total": 2.0386174,
"count": 1,
"self": 2.0386174
},
"TrainerController.advance": {
"total": 3456.289249100014,
"count": 401100,
"self": 4.628672400080177,
"children": {
"env_step": {
"total": 3451.6605766999337,
"count": 401100,
"self": 1503.283720300047,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1945.243238499896,
"count": 401100,
"self": 10.410247899865453,
"children": {
"TorchPolicy.evaluate": {
"total": 1934.8329906000306,
"count": 400160,
"self": 1934.8329906000306
}
}
},
"workers": {
"total": 3.1336178999907576,
"count": 401100,
"self": 0.0,
"children": {
"worker_root": {
"total": 3456.7936839999793,
"count": 401100,
"is_parallel": true,
"self": 2138.8945531000036,
"children": {
"steps_from_proto": {
"total": 0.006685200000000169,
"count": 1,
"is_parallel": true,
"self": 0.00010800000000021903,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.006530399999999936,
"count": 2,
"is_parallel": true,
"self": 4.0900000000121395e-05,
"children": {
"_observation_to_np_array": {
"total": 0.006489499999999815,
"count": 3,
"is_parallel": true,
"self": 4.129999999968881e-05,
"children": {
"process_pixels": {
"total": 0.006448200000000126,
"count": 3,
"is_parallel": true,
"self": 0.00024410000000019139,
"children": {
"image_decompress": {
"total": 0.006204099999999935,
"count": 3,
"is_parallel": true,
"self": 0.006204099999999935
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 4.68000000000135e-05,
"count": 2,
"is_parallel": true,
"self": 4.68000000000135e-05
}
}
},
"UnityEnvironment.step": {
"total": 1317.8924456999757,
"count": 401100,
"is_parallel": true,
"self": 17.111256100049104,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 19.5919268999856,
"count": 401100,
"is_parallel": true,
"self": 19.5919268999856
},
"communicator.exchange": {
"total": 1142.8874947000145,
"count": 401100,
"is_parallel": true,
"self": 1142.8874947000145
},
"steps_from_proto": {
"total": 138.30176799992657,
"count": 401100,
"is_parallel": true,
"self": 27.261460699962967,
"children": {
"_process_maybe_compressed_observation": {
"total": 99.4648361999284,
"count": 802200,
"is_parallel": true,
"self": 7.785914999949924,
"children": {
"_observation_to_np_array": {
"total": 91.67892119997848,
"count": 1203573,
"is_parallel": true,
"self": 7.851400799969497,
"children": {
"process_pixels": {
"total": 83.82752040000898,
"count": 1203573,
"is_parallel": true,
"self": 39.75448050011749,
"children": {
"image_decompress": {
"total": 44.07303989989149,
"count": 1203573,
"is_parallel": true,
"self": 44.07303989989149
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 11.5754711000352,
"count": 802200,
"is_parallel": true,
"self": 11.5754711000352
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.3400000120309414e-05,
"count": 1,
"self": 3.3400000120309414e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 3459.4188590000413,
"count": 166188,
"is_parallel": true,
"self": 5.475295599989295,
"children": {
"process_trajectory": {
"total": 2714.897038600053,
"count": 166188,
"is_parallel": true,
"self": 2714.497464300053,
"children": {
"RLTrainer._checkpoint": {
"total": 0.39957430000004024,
"count": 2,
"is_parallel": true,
"self": 0.39957430000004024
}
}
},
"_update_policy": {
"total": 739.0465247999991,
"count": 137,
"is_parallel": true,
"self": 489.2955001999993,
"children": {
"TorchPPOOptimizer.update": {
"total": 249.75102459999982,
"count": 3408,
"is_parallel": true,
"self": 249.75102459999982
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.17360490000010032,
"count": 1,
"self": 0.006113800000093761,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16749110000000655,
"count": 1,
"self": 0.16749110000000655
}
}
}
}
}
}
}