ppo-Huggy / run_logs /timers.json
soypablo's picture
Huggy
c93359a
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4082987308502197,
"min": 1.4082987308502197,
"max": 1.428156852722168,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71872.5234375,
"min": 69048.828125,
"max": 77726.359375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 83.43001686340641,
"min": 75.51454823889739,
"max": 386.19847328244276,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49474.0,
"min": 48849.0,
"max": 50592.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999982.0,
"min": 49974.0,
"max": 1999982.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999982.0,
"min": 49974.0,
"max": 1999982.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.399160623550415,
"min": 0.09917344897985458,
"max": 2.524322986602783,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1422.7022705078125,
"min": 12.892548561096191,
"max": 1574.64599609375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7327632797508175,
"min": 1.6438754010658998,
"max": 4.0376436045174025,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2213.528624892235,
"min": 213.70380213856697,
"max": 2556.865726828575,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7327632797508175,
"min": 1.6438754010658998,
"max": 4.0376436045174025,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2213.528624892235,
"min": 213.70380213856697,
"max": 2556.865726828575,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016470587918754772,
"min": 0.012531998450261502,
"max": 0.019990008768662745,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04941176375626431,
"min": 0.025063996900523005,
"max": 0.05997002630598823,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.055000965876711744,
"min": 0.022043215793867907,
"max": 0.059881780420740445,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16500289763013523,
"min": 0.044086431587735814,
"max": 0.17280960095425446,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.7095987634999983e-06,
"min": 3.7095987634999983e-06,
"max": 0.000295340926553025,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1128796290499995e-05,
"min": 1.1128796290499995e-05,
"max": 0.0008440404186531999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1012365,
"min": 0.1012365,
"max": 0.198446975,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30370949999999997,
"min": 0.20765895,
"max": 0.5813467999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.170135e-05,
"min": 7.170135e-05,
"max": 0.0049225040525,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021510405,
"min": 0.00021510405,
"max": 0.01406920532,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685281590",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685284061"
},
"total": 2471.822751822,
"count": 1,
"self": 0.43651347600007284,
"children": {
"run_training.setup": {
"total": 0.07163837299998477,
"count": 1,
"self": 0.07163837299998477
},
"TrainerController.start_learning": {
"total": 2471.314599973,
"count": 1,
"self": 4.5602248400387,
"children": {
"TrainerController._reset_env": {
"total": 5.19379992399999,
"count": 1,
"self": 5.19379992399999
},
"TrainerController.advance": {
"total": 2461.4321647009615,
"count": 232810,
"self": 4.803370448920305,
"children": {
"env_step": {
"total": 1903.873455169032,
"count": 232810,
"self": 1611.4580651060082,
"children": {
"SubprocessEnvManager._take_step": {
"total": 289.4996763880005,
"count": 232810,
"self": 17.176405103893615,
"children": {
"TorchPolicy.evaluate": {
"total": 272.3232712841069,
"count": 223030,
"self": 272.3232712841069
}
}
},
"workers": {
"total": 2.915713675023113,
"count": 232810,
"self": 0.0,
"children": {
"worker_root": {
"total": 2463.5415256140163,
"count": 232810,
"is_parallel": true,
"self": 1146.5966194290868,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0012222339999823362,
"count": 1,
"is_parallel": true,
"self": 0.00036600499998939995,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008562289999929362,
"count": 2,
"is_parallel": true,
"self": 0.0008562289999929362
}
}
},
"UnityEnvironment.step": {
"total": 0.02956781700001443,
"count": 1,
"is_parallel": true,
"self": 0.0003339820000292093,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00024699599998712074,
"count": 1,
"is_parallel": true,
"self": 0.00024699599998712074
},
"communicator.exchange": {
"total": 0.028212362000004987,
"count": 1,
"is_parallel": true,
"self": 0.028212362000004987
},
"steps_from_proto": {
"total": 0.0007744769999931123,
"count": 1,
"is_parallel": true,
"self": 0.00020528299998545663,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005691940000076556,
"count": 2,
"is_parallel": true,
"self": 0.0005691940000076556
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1316.9449061849296,
"count": 232809,
"is_parallel": true,
"self": 39.503592145944594,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.53288551393209,
"count": 232809,
"is_parallel": true,
"self": 80.53288551393209
},
"communicator.exchange": {
"total": 1101.4910926680095,
"count": 232809,
"is_parallel": true,
"self": 1101.4910926680095
},
"steps_from_proto": {
"total": 95.41733585704341,
"count": 232809,
"is_parallel": true,
"self": 34.713347154075166,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.703988702968246,
"count": 465618,
"is_parallel": true,
"self": 60.703988702968246
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 552.7553390830092,
"count": 232810,
"self": 6.871999688973915,
"children": {
"process_trajectory": {
"total": 141.08449630203685,
"count": 232810,
"self": 139.6265124320374,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4579838699994525,
"count": 10,
"self": 1.4579838699994525
}
}
},
"_update_policy": {
"total": 404.7988430919984,
"count": 97,
"self": 343.6057765439972,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.193066548001156,
"count": 2910,
"self": 61.193066548001156
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1189999895577785e-06,
"count": 1,
"self": 1.1189999895577785e-06
},
"TrainerController._save_models": {
"total": 0.1284093889998985,
"count": 1,
"self": 0.0020783469994967163,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1263310420004018,
"count": 1,
"self": 0.1263310420004018
}
}
}
}
}
}
}