ppo-Huggy / run_logs /timers.json
Shivraj8615's picture
Huggy
553ce60
raw
history blame contribute delete
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4046720266342163,
"min": 1.4046720266342163,
"max": 1.4259588718414307,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70534.203125,
"min": 69282.03125,
"max": 78078.953125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 78.01895734597156,
"min": 72.39296187683284,
"max": 385.5076923076923,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49386.0,
"min": 49193.0,
"max": 50116.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999993.0,
"min": 49859.0,
"max": 1999993.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999993.0,
"min": 49859.0,
"max": 1999993.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5029895305633545,
"min": 0.1312311887741089,
"max": 2.5220463275909424,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1584.392333984375,
"min": 16.928823471069336,
"max": 1720.0355224609375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.971859020926941,
"min": 1.767885948798453,
"max": 4.05609040976436,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2514.1867602467537,
"min": 228.05728739500046,
"max": 2679.3828971385956,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.971859020926941,
"min": 1.767885948798453,
"max": 4.05609040976436,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2514.1867602467537,
"min": 228.05728739500046,
"max": 2679.3828971385956,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015895140574624142,
"min": 0.013456903446422076,
"max": 0.020813762316417222,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.047685421723872426,
"min": 0.02691380689284415,
"max": 0.05735021146635215,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05925275199115276,
"min": 0.020371780544519425,
"max": 0.06188846913476785,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1777582559734583,
"min": 0.04074356108903885,
"max": 0.1777582559734583,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.850948716383328e-06,
"min": 3.850948716383328e-06,
"max": 0.0002952927015691001,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1552846149149984e-05,
"min": 1.1552846149149984e-05,
"max": 0.0008439681186772999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10128361666666667,
"min": 0.10128361666666667,
"max": 0.1984309,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30385085,
"min": 0.20770309999999997,
"max": 0.5813227000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.405247166666655e-05,
"min": 7.405247166666655e-05,
"max": 0.0049217019099999995,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022215741499999965,
"min": 0.00022215741499999965,
"max": 0.014068002729999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670478697",
"python_version": "3.8.15 (default, Oct 12 2022, 19:14:39) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670481035"
},
"total": 2338.231583319,
"count": 1,
"self": 0.44553015999963463,
"children": {
"run_training.setup": {
"total": 0.178898947000107,
"count": 1,
"self": 0.178898947000107
},
"TrainerController.start_learning": {
"total": 2337.6071542120003,
"count": 1,
"self": 4.023166821895757,
"children": {
"TrainerController._reset_env": {
"total": 11.833396259999972,
"count": 1,
"self": 11.833396259999972
},
"TrainerController.advance": {
"total": 2321.622105226104,
"count": 233568,
"self": 4.3891737380495215,
"children": {
"env_step": {
"total": 1818.6086585449839,
"count": 233568,
"self": 1527.4998364027902,
"children": {
"SubprocessEnvManager._take_step": {
"total": 288.43525974914655,
"count": 233568,
"self": 15.07611909402749,
"children": {
"TorchPolicy.evaluate": {
"total": 273.35914065511906,
"count": 222978,
"self": 68.68870773709477,
"children": {
"TorchPolicy.sample_actions": {
"total": 204.6704329180243,
"count": 222978,
"self": 204.6704329180243
}
}
}
}
},
"workers": {
"total": 2.6735623930471775,
"count": 233568,
"self": 0.0,
"children": {
"worker_root": {
"total": 2329.6145393920233,
"count": 233568,
"is_parallel": true,
"self": 1074.3235312100965,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021281089998410607,
"count": 1,
"is_parallel": true,
"self": 0.00038618799999312614,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017419209998479346,
"count": 2,
"is_parallel": true,
"self": 0.0017419209998479346
}
}
},
"UnityEnvironment.step": {
"total": 0.0285908500000005,
"count": 1,
"is_parallel": true,
"self": 0.0002866210002139269,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019668100003400468,
"count": 1,
"is_parallel": true,
"self": 0.00019668100003400468
},
"communicator.exchange": {
"total": 0.02735693899967373,
"count": 1,
"is_parallel": true,
"self": 0.02735693899967373
},
"steps_from_proto": {
"total": 0.000750609000078839,
"count": 1,
"is_parallel": true,
"self": 0.0002639260001160437,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004866829999627953,
"count": 2,
"is_parallel": true,
"self": 0.0004866829999627953
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1255.2910081819268,
"count": 233567,
"is_parallel": true,
"self": 35.35802618060006,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.68856884720662,
"count": 233567,
"is_parallel": true,
"self": 77.68856884720662
},
"communicator.exchange": {
"total": 1046.3223892540377,
"count": 233567,
"is_parallel": true,
"self": 1046.3223892540377
},
"steps_from_proto": {
"total": 95.92202390008242,
"count": 233567,
"is_parallel": true,
"self": 39.71906769517682,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.202956204905604,
"count": 467134,
"is_parallel": true,
"self": 56.202956204905604
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 498.62427294307054,
"count": 233568,
"self": 6.1079921240348085,
"children": {
"process_trajectory": {
"total": 161.45417828703148,
"count": 233568,
"self": 160.95831382303186,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4958644639996237,
"count": 4,
"self": 0.4958644639996237
}
}
},
"_update_policy": {
"total": 331.06210253200425,
"count": 97,
"self": 276.57440422398486,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.48769830801939,
"count": 2910,
"self": 54.48769830801939
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.590003173798323e-07,
"count": 1,
"self": 9.590003173798323e-07
},
"TrainerController._save_models": {
"total": 0.12848494500030938,
"count": 1,
"self": 0.002894036000725464,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12559090899958392,
"count": 1,
"self": 0.12559090899958392
}
}
}
}
}
}
}