jariasn's picture
First Push
ab80f0e
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1833347082138062,
"min": 1.1833347082138062,
"max": 2.8769798278808594,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 11324.513671875,
"min": 11324.513671875,
"max": 29526.443359375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.113080978393555,
"min": 0.07073356211185455,
"max": 11.113080978393555,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2167.05078125,
"min": 13.722311019897461,
"max": 2194.4580078125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.113636363636363,
"min": 3.25,
"max": 23.29090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1017.0,
"min": 143.0,
"max": 1281.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.113636363636363,
"min": 3.25,
"max": 23.29090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1017.0,
"min": 143.0,
"max": 1281.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07164471858271224,
"min": 0.062384990135672294,
"max": 0.07585342966984149,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.14328943716542447,
"min": 0.12476998027134459,
"max": 0.22756028900952446,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21838094893039442,
"min": 0.11401407709937328,
"max": 0.27878005671150546,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.43676189786078884,
"min": 0.22802815419874656,
"max": 0.8363401701345163,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 6.432097856000005e-06,
"min": 6.432097856000005e-06,
"max": 0.000290232003256,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.286419571200001e-05,
"min": 1.286419571200001e-05,
"max": 0.0007419960526679999,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10214399999999998,
"min": 0.10214399999999998,
"max": 0.19674400000000003,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.20428799999999997,
"min": 0.20428799999999997,
"max": 0.5473319999999999,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00011698560000000007,
"min": 0.00011698560000000007,
"max": 0.004837525599999998,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00023397120000000015,
"min": 0.00023397120000000015,
"max": 0.0123718668,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690819468",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690820144"
},
"total": 676.065579782,
"count": 1,
"self": 0.5393505800000185,
"children": {
"run_training.setup": {
"total": 0.05110353399999212,
"count": 1,
"self": 0.05110353399999212
},
"TrainerController.start_learning": {
"total": 675.475125668,
"count": 1,
"self": 0.8121483389900277,
"children": {
"TrainerController._reset_env": {
"total": 2.1541794759999675,
"count": 1,
"self": 2.1541794759999675
},
"TrainerController.advance": {
"total": 672.29147469701,
"count": 18205,
"self": 0.3956082730072694,
"children": {
"env_step": {
"total": 671.8958664240027,
"count": 18205,
"self": 562.3773612950133,
"children": {
"SubprocessEnvManager._take_step": {
"total": 109.10618123799469,
"count": 18205,
"self": 2.844499962000839,
"children": {
"TorchPolicy.evaluate": {
"total": 106.26168127599385,
"count": 18205,
"self": 106.26168127599385
}
}
},
"workers": {
"total": 0.4123238909946849,
"count": 18205,
"self": 0.0,
"children": {
"worker_root": {
"total": 673.1190867859951,
"count": 18205,
"is_parallel": true,
"self": 344.1748832589853,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004471604000002571,
"count": 1,
"is_parallel": true,
"self": 0.0026129749999199703,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018586290000826011,
"count": 10,
"is_parallel": true,
"self": 0.0018586290000826011
}
}
},
"UnityEnvironment.step": {
"total": 0.04600165899995545,
"count": 1,
"is_parallel": true,
"self": 0.0008015829999976631,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004565360000015062,
"count": 1,
"is_parallel": true,
"self": 0.0004565360000015062
},
"communicator.exchange": {
"total": 0.04216569499999423,
"count": 1,
"is_parallel": true,
"self": 0.04216569499999423
},
"steps_from_proto": {
"total": 0.0025778449999620534,
"count": 1,
"is_parallel": true,
"self": 0.0004778019998639138,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021000430000981396,
"count": 10,
"is_parallel": true,
"self": 0.0021000430000981396
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 328.94420352700985,
"count": 18204,
"is_parallel": true,
"self": 14.68993989699942,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.887491835007097,
"count": 18204,
"is_parallel": true,
"self": 7.887491835007097
},
"communicator.exchange": {
"total": 258.98359614699643,
"count": 18204,
"is_parallel": true,
"self": 258.98359614699643
},
"steps_from_proto": {
"total": 47.3831756480069,
"count": 18204,
"is_parallel": true,
"self": 9.443403564034838,
"children": {
"_process_rank_one_or_two_observation": {
"total": 37.93977208397206,
"count": 182040,
"is_parallel": true,
"self": 37.93977208397206
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00022650799996881688,
"count": 1,
"self": 0.00022650799996881688,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 665.5515257189005,
"count": 728883,
"is_parallel": true,
"self": 17.463047892928103,
"children": {
"process_trajectory": {
"total": 385.9351429879723,
"count": 728883,
"is_parallel": true,
"self": 384.6503077559722,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2848352320000913,
"count": 4,
"is_parallel": true,
"self": 1.2848352320000913
}
}
},
"_update_policy": {
"total": 262.15333483800015,
"count": 45,
"is_parallel": true,
"self": 84.49116405400014,
"children": {
"TorchPPOOptimizer.update": {
"total": 177.662170784,
"count": 4587,
"is_parallel": true,
"self": 177.662170784
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.21709664800005157,
"count": 1,
"self": 0.004119508000030692,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21297714000002088,
"count": 1,
"self": 0.21297714000002088
}
}
}
}
}
}
}