osanseviero's picture
First Push
cbd0158
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.8154773712158203,
"min": 2.8154773712158203,
"max": 2.875230312347412,
"count": 2
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 29019.125,
"min": 29019.125,
"max": 29445.234375,
"count": 2
},
"SnowballTarget.Step.mean": {
"value": 19992.0,
"min": 9952.0,
"max": 19992.0,
"count": 2
},
"SnowballTarget.Step.sum": {
"value": 19992.0,
"min": 9952.0,
"max": 19992.0,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 1.4100492000579834,
"min": 0.427836149930954,
"max": 1.4100492000579834,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 289.0600891113281,
"min": 83.00021362304688,
"max": 289.0600891113281,
"count": 2
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 2
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 2
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06787747711986965,
"min": 0.06482923300951798,
"max": 0.06787747711986965,
"count": 2
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3393873855993482,
"min": 0.25931693203807193,
"max": 0.3393873855993482,
"count": 2
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19812133379426658,
"min": 0.11161829259100498,
"max": 0.19812133379426658,
"count": 2
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9906066689713329,
"min": 0.4464731703640199,
"max": 0.9906066689713329,
"count": 2
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.032007656e-05,
"min": 7.032007656e-05,
"max": 0.00021882002706000002,
"count": 2
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0003516003828,
"min": 0.0003516003828,
"max": 0.0008752801082400001,
"count": 2
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.12344000000000002,
"min": 0.12344000000000002,
"max": 0.17294,
"count": 2
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.6172000000000001,
"min": 0.6172000000000001,
"max": 0.69176,
"count": 2
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0011796560000000003,
"min": 0.0011796560000000003,
"max": 0.003649706,
"count": 2
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.005898280000000001,
"min": 0.005898280000000001,
"max": 0.014598824,
"count": 2
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 5.909090909090909,
"min": 2.9318181818181817,
"max": 5.909090909090909,
"count": 2
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 325.0,
"min": 129.0,
"max": 325.0,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 5.909090909090909,
"min": 2.9318181818181817,
"max": 5.909090909090909,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 325.0,
"min": 129.0,
"max": 325.0,
"count": 2
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673805666",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673805719"
},
"total": 53.054175661000016,
"count": 1,
"self": 0.3881745330000115,
"children": {
"run_training.setup": {
"total": 0.1114968600000168,
"count": 1,
"self": 0.1114968600000168
},
"TrainerController.start_learning": {
"total": 52.55450426799999,
"count": 1,
"self": 0.08429074299971262,
"children": {
"TrainerController._reset_env": {
"total": 9.41487313899998,
"count": 1,
"self": 9.41487313899998
},
"TrainerController.advance": {
"total": 42.924624938000306,
"count": 1869,
"self": 0.02837261900160115,
"children": {
"env_step": {
"total": 42.896252318998705,
"count": 1869,
"self": 28.051600423999417,
"children": {
"SubprocessEnvManager._take_step": {
"total": 14.813741126998593,
"count": 1869,
"self": 0.14230745399831335,
"children": {
"TorchPolicy.evaluate": {
"total": 14.67143367300028,
"count": 1869,
"self": 3.0769798910006045,
"children": {
"TorchPolicy.sample_actions": {
"total": 11.594453781999675,
"count": 1869,
"self": 11.594453781999675
}
}
}
}
},
"workers": {
"total": 0.03091076800069459,
"count": 1869,
"self": 0.0,
"children": {
"worker_root": {
"total": 52.384632305000764,
"count": 1869,
"is_parallel": true,
"self": 29.874482992999503,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008841642999982469,
"count": 1,
"is_parallel": true,
"self": 0.00648357600002214,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023580669999603288,
"count": 10,
"is_parallel": true,
"self": 0.0023580669999603288
}
}
},
"UnityEnvironment.step": {
"total": 0.033110827000001564,
"count": 1,
"is_parallel": true,
"self": 0.0003681319999770949,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00031754700000874436,
"count": 1,
"is_parallel": true,
"self": 0.00031754700000874436
},
"communicator.exchange": {
"total": 0.03057126199999516,
"count": 1,
"is_parallel": true,
"self": 0.03057126199999516
},
"steps_from_proto": {
"total": 0.001853886000020566,
"count": 1,
"is_parallel": true,
"self": 0.00043334899996239074,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014205370000581752,
"count": 10,
"is_parallel": true,
"self": 0.0014205370000581752
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 22.51014931200126,
"count": 1868,
"is_parallel": true,
"self": 0.8672138890012207,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.5257180250004012,
"count": 1868,
"is_parallel": true,
"self": 0.5257180250004012
},
"communicator.exchange": {
"total": 17.926169329999198,
"count": 1868,
"is_parallel": true,
"self": 17.926169329999198
},
"steps_from_proto": {
"total": 3.191048068000441,
"count": 1868,
"is_parallel": true,
"self": 0.6828028759951223,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2.508245192005319,
"count": 18680,
"is_parallel": true,
"self": 2.508245192005319
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.2361999987861054e-05,
"count": 1,
"self": 4.2361999987861054e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 42.65500256499868,
"count": 35510,
"is_parallel": true,
"self": 0.926596289999793,
"children": {
"process_trajectory": {
"total": 24.667324749998983,
"count": 35510,
"is_parallel": true,
"self": 24.667324749998983
},
"_update_policy": {
"total": 17.061081524999906,
"count": 9,
"is_parallel": true,
"self": 4.255934738000406,
"children": {
"TorchPPOOptimizer.update": {
"total": 12.8051467869995,
"count": 456,
"is_parallel": true,
"self": 12.8051467869995
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1306730860000016,
"count": 1,
"self": 0.0008738569999877654,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12979922900001384,
"count": 1,
"self": 0.12979922900001384
}
}
}
}
}
}
}