Pyramids / run_logs /timers.json
SAL83's picture
Push
25da879
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4229440987110138,
"min": 0.4189181327819824,
"max": 1.523527979850769,
"count": 37
},
"Pyramids.Policy.Entropy.sum": {
"value": 12613.884765625,
"min": 12520.625,
"max": 46217.74609375,
"count": 37
},
"Pyramids.Step.mean": {
"value": 1109950.0,
"min": 29952.0,
"max": 1109950.0,
"count": 37
},
"Pyramids.Step.sum": {
"value": 1109950.0,
"min": 29952.0,
"max": 1109950.0,
"count": 37
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5420405864715576,
"min": -0.11092951893806458,
"max": 0.5420405864715576,
"count": 37
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 149.0611572265625,
"min": -26.84494400024414,
"max": 149.78140258789062,
"count": 37
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.005026669707149267,
"min": -0.003389290999621153,
"max": 0.42502084374427795,
"count": 37
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.3823341131210327,
"min": -0.9049407243728638,
"max": 100.72994232177734,
"count": 37
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07108240525513113,
"min": 0.06528723666985735,
"max": 0.0742275493539637,
"count": 37
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9951536735718357,
"min": 0.47906342694260196,
"max": 1.0898301629440894,
"count": 37
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012519621625775799,
"min": 0.00038778672072526673,
"max": 0.014062284119445545,
"count": 37
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.17527470276086118,
"min": 0.0050412273694284675,
"max": 0.20037357302984066,
"count": 37
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.0001904392293774119,
"min": 0.0001904392293774119,
"max": 0.00029838354339596195,
"count": 37
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0026661492112837666,
"min": 0.0020691136102954665,
"max": 0.003759513146829,
"count": 37
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.16347973095238094,
"min": 0.16347973095238094,
"max": 0.19946118095238097,
"count": 37
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.288716233333333,
"min": 1.3897045333333333,
"max": 2.752557733333333,
"count": 37
},
"Pyramids.Policy.Beta.mean": {
"value": 0.006351625122142857,
"min": 0.006351625122142857,
"max": 0.009946171977142856,
"count": 37
},
"Pyramids.Policy.Beta.sum": {
"value": 0.08892275171,
"min": 0.06897148288,
"max": 0.1253317829,
"count": 37
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.006684461142867804,
"min": 0.006684461142867804,
"max": 0.36375197768211365,
"count": 37
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.09358245879411697,
"min": 0.09358245879411697,
"max": 2.5462639331817627,
"count": 37
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 329.22093023255815,
"min": 329.22093023255815,
"max": 999.0,
"count": 37
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28313.0,
"min": 15984.0,
"max": 32598.0,
"count": 37
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6009906757363053,
"min": -1.0000000521540642,
"max": 1.6009906757363053,
"count": 37
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 137.68519811332226,
"min": -32.000001668930054,
"max": 137.68519811332226,
"count": 37
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6009906757363053,
"min": -1.0000000521540642,
"max": 1.6009906757363053,
"count": 37
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 137.68519811332226,
"min": -32.000001668930054,
"max": 137.68519811332226,
"count": 37
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02284779747271894,
"min": 0.02284779747271894,
"max": 7.149970472790301,
"count": 37
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.964910582653829,
"min": 1.964910582653829,
"max": 114.39952756464481,
"count": 37
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 37
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 37
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679612196",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679614488"
},
"total": 2291.9120064339995,
"count": 1,
"self": 0.5707430259999455,
"children": {
"run_training.setup": {
"total": 0.1858476370000517,
"count": 1,
"self": 0.1858476370000517
},
"TrainerController.start_learning": {
"total": 2291.1554157709998,
"count": 1,
"self": 1.4363098690323568,
"children": {
"TrainerController._reset_env": {
"total": 9.442500624999866,
"count": 1,
"self": 9.442500624999866
},
"TrainerController.advance": {
"total": 2279.9776958459674,
"count": 71988,
"self": 1.5922970449742024,
"children": {
"env_step": {
"total": 1583.661105826942,
"count": 71988,
"self": 1462.2204458579392,
"children": {
"SubprocessEnvManager._take_step": {
"total": 120.52798532802717,
"count": 71988,
"self": 5.212469179051368,
"children": {
"TorchPolicy.evaluate": {
"total": 115.3155161489758,
"count": 70745,
"self": 115.3155161489758
}
}
},
"workers": {
"total": 0.9126746409756379,
"count": 71987,
"self": 0.0,
"children": {
"worker_root": {
"total": 2286.1412903539226,
"count": 71987,
"is_parallel": true,
"self": 952.1478324109278,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004751907999889227,
"count": 1,
"is_parallel": true,
"self": 0.0033636399998613342,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013882680000278924,
"count": 8,
"is_parallel": true,
"self": 0.0013882680000278924
}
}
},
"UnityEnvironment.step": {
"total": 0.047330315999943195,
"count": 1,
"is_parallel": true,
"self": 0.0005455760001495946,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004437469999629684,
"count": 1,
"is_parallel": true,
"self": 0.0004437469999629684
},
"communicator.exchange": {
"total": 0.044805161999875054,
"count": 1,
"is_parallel": true,
"self": 0.044805161999875054
},
"steps_from_proto": {
"total": 0.0015358309999555786,
"count": 1,
"is_parallel": true,
"self": 0.00033245999998143816,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012033709999741404,
"count": 8,
"is_parallel": true,
"self": 0.0012033709999741404
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1333.9934579429948,
"count": 71986,
"is_parallel": true,
"self": 35.36945377094594,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.69136506301743,
"count": 71986,
"is_parallel": true,
"self": 25.69136506301743
},
"communicator.exchange": {
"total": 1171.6244269460249,
"count": 71986,
"is_parallel": true,
"self": 1171.6244269460249
},
"steps_from_proto": {
"total": 101.3082121630066,
"count": 71986,
"is_parallel": true,
"self": 21.470143655967604,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.838068507039,
"count": 575888,
"is_parallel": true,
"self": 79.838068507039
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 694.7242929740512,
"count": 71987,
"self": 2.852848954955789,
"children": {
"process_trajectory": {
"total": 129.29073910309467,
"count": 71987,
"self": 128.98020645009478,
"children": {
"RLTrainer._checkpoint": {
"total": 0.310532652999882,
"count": 2,
"self": 0.310532652999882
}
}
},
"_update_policy": {
"total": 562.5807049160007,
"count": 497,
"self": 356.25463191701147,
"children": {
"TorchPPOOptimizer.update": {
"total": 206.32607299898928,
"count": 25809,
"self": 206.32607299898928
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4199999895936344e-06,
"count": 1,
"self": 1.4199999895936344e-06
},
"TrainerController._save_models": {
"total": 0.2989080110000941,
"count": 1,
"self": 0.0018941320004159934,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2970138789996781,
"count": 1,
"self": 0.2970138789996781
}
}
}
}
}
}
}