ppo-Pyramids / run_logs /timers.json
LolloMagic's picture
First Push
3553198 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4974146783351898,
"min": 0.46742406487464905,
"max": 1.496902346611023,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14898.564453125,
"min": 13925.498046875,
"max": 45410.03125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989948.0,
"min": 29952.0,
"max": 989948.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989948.0,
"min": 29952.0,
"max": 989948.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4042978286743164,
"min": -0.10191190242767334,
"max": 0.45109280943870544,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 110.37330627441406,
"min": -24.560768127441406,
"max": 120.44178009033203,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.06810474395751953,
"min": -0.03129679709672928,
"max": 0.30708253383636475,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 18.592594146728516,
"min": -7.9493865966796875,
"max": 74.31397247314453,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0663178983723767,
"min": 0.06520124614083518,
"max": 0.07302835883079593,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9284505772132738,
"min": 0.5056778671130961,
"max": 1.0303814954457657,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017021256550617096,
"min": 0.0008146566462248917,
"max": 0.017021256550617096,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23829759170863932,
"min": 0.011405193047148484,
"max": 0.23829759170863932,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.568840334228572e-06,
"min": 7.568840334228572e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010596376467920001,
"min": 0.00010596376467920001,
"max": 0.0036333166888944998,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252291428571429,
"min": 0.10252291428571429,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4353208000000002,
"min": 1.3886848,
"max": 2.6111055000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026203913714285717,
"min": 0.00026203913714285717,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00366854792,
"min": 0.00366854792,
"max": 0.12112943945000003,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013958967290818691,
"min": 0.013958967290818691,
"max": 0.49822303652763367,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19542554020881653,
"min": 0.19542554020881653,
"max": 3.4875612258911133,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 430.65277777777777,
"min": 421.6857142857143,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31007.0,
"min": 15984.0,
"max": 33554.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4582027554925945,
"min": -1.0000000521540642,
"max": 1.521159974166325,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 104.9905983954668,
"min": -28.597001545131207,
"max": 106.48119819164276,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4582027554925945,
"min": -1.0000000521540642,
"max": 1.521159974166325,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 104.9905983954668,
"min": -28.597001545131207,
"max": 106.48119819164276,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06229965716597184,
"min": 0.06169865586603659,
"max": 10.574366303160787,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.485575315949973,
"min": 4.318905910622561,
"max": 169.1898608505726,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1764590669",
"python_version": "3.10.6 | packaged by conda-forge | (main, Aug 22 2022, 20:35:26) [GCC 10.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1764593027"
},
"total": 2357.4462465429997,
"count": 1,
"self": 0.6822104259990738,
"children": {
"run_training.setup": {
"total": 0.033529772000292724,
"count": 1,
"self": 0.033529772000292724
},
"TrainerController.start_learning": {
"total": 2356.7305063450003,
"count": 1,
"self": 1.6479185770303957,
"children": {
"TrainerController._reset_env": {
"total": 2.5253059189999476,
"count": 1,
"self": 2.5253059189999476
},
"TrainerController.advance": {
"total": 2352.47396804497,
"count": 63562,
"self": 1.855875773990192,
"children": {
"env_step": {
"total": 1655.3416131259119,
"count": 63562,
"self": 1484.8434439579887,
"children": {
"SubprocessEnvManager._take_step": {
"total": 169.43014482714898,
"count": 63562,
"self": 5.213715154056445,
"children": {
"TorchPolicy.evaluate": {
"total": 164.21642967309253,
"count": 62559,
"self": 164.21642967309253
}
}
},
"workers": {
"total": 1.0680243407741727,
"count": 63562,
"self": 0.0,
"children": {
"worker_root": {
"total": 2349.517061511885,
"count": 63562,
"is_parallel": true,
"self": 998.9021128497116,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0026042959998449078,
"count": 1,
"is_parallel": true,
"self": 0.0007764569991195458,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001827839000725362,
"count": 8,
"is_parallel": true,
"self": 0.001827839000725362
}
}
},
"UnityEnvironment.step": {
"total": 0.050039310999636655,
"count": 1,
"is_parallel": true,
"self": 0.0005490039993674145,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045168199994805036,
"count": 1,
"is_parallel": true,
"self": 0.00045168199994805036
},
"communicator.exchange": {
"total": 0.04724044100021274,
"count": 1,
"is_parallel": true,
"self": 0.04724044100021274
},
"steps_from_proto": {
"total": 0.0017981840001084493,
"count": 1,
"is_parallel": true,
"self": 0.0004797269984919694,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013184570016164798,
"count": 8,
"is_parallel": true,
"self": 0.0013184570016164798
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1350.6149486621734,
"count": 63561,
"is_parallel": true,
"self": 36.2927346902743,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.63818100607932,
"count": 63561,
"is_parallel": true,
"self": 23.63818100607932
},
"communicator.exchange": {
"total": 1176.231023491874,
"count": 63561,
"is_parallel": true,
"self": 1176.231023491874
},
"steps_from_proto": {
"total": 114.45300947394571,
"count": 63561,
"is_parallel": true,
"self": 25.026604518810927,
"children": {
"_process_rank_one_or_two_observation": {
"total": 89.42640495513479,
"count": 508488,
"is_parallel": true,
"self": 89.42640495513479
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 695.2764791450682,
"count": 63562,
"self": 3.323459063998598,
"children": {
"process_trajectory": {
"total": 134.7056308500596,
"count": 63562,
"self": 134.51638966705923,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18924118300037662,
"count": 2,
"self": 0.18924118300037662
}
}
},
"_update_policy": {
"total": 557.24738923101,
"count": 453,
"self": 308.628019144021,
"children": {
"TorchPPOOptimizer.update": {
"total": 248.61937008698897,
"count": 22752,
"self": 248.61937008698897
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.850001904647797e-07,
"count": 1,
"self": 8.850001904647797e-07
},
"TrainerController._save_models": {
"total": 0.0833129189995816,
"count": 1,
"self": 0.0012929419999636593,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08201997699961794,
"count": 1,
"self": 0.08201997699961794
}
}
}
}
}
}
}