SamFic's picture
First Push
cc0e44e verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3506196141242981,
"min": 0.3506196141242981,
"max": 1.435718059539795,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10597.126953125,
"min": 10597.126953125,
"max": 43553.94140625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989968.0,
"min": 29904.0,
"max": 989968.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989968.0,
"min": 29904.0,
"max": 989968.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5892725586891174,
"min": -0.08595546334981918,
"max": 0.6756123304367065,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 163.81776428222656,
"min": -20.715267181396484,
"max": 194.57635498046875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.030525920912623405,
"min": -0.030525920912623405,
"max": 0.32834094762802124,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -8.4862060546875,
"min": -8.4862060546875,
"max": 77.81680297851562,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06778520575469538,
"min": 0.06664934588479811,
"max": 0.07466580772528515,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9489928805657353,
"min": 0.5589956592803165,
"max": 1.0588430338539183,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01667608187834084,
"min": 0.0010325357794728812,
"max": 0.0175427032673421,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23346514629677176,
"min": 0.013422965133147457,
"max": 0.2631405490101315,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.561747479450004e-06,
"min": 7.561747479450004e-06,
"max": 0.00029521470159509997,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010586446471230006,
"min": 0.00010586446471230006,
"max": 0.0036330592889803,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252055000000002,
"min": 0.10252055000000002,
"max": 0.1984049,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4352877000000002,
"min": 1.4352877000000002,
"max": 2.6110197000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002618029450000002,
"min": 0.0002618029450000002,
"max": 0.00984064951,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003665241230000002,
"min": 0.003665241230000002,
"max": 0.12112086803000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01119464635848999,
"min": 0.01119464635848999,
"max": 0.39911597967147827,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15672504901885986,
"min": 0.15672504901885986,
"max": 3.192927837371826,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 321.3936170212766,
"min": 276.51960784313724,
"max": 996.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30211.0,
"min": 15936.0,
"max": 34214.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6147616862299594,
"min": -0.9288065027325384,
"max": 1.723480379902849,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 151.7875985056162,
"min": -28.79300158470869,
"max": 178.2849980443716,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6147616862299594,
"min": -0.9288065027325384,
"max": 1.723480379902849,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 151.7875985056162,
"min": -28.79300158470869,
"max": 178.2849980443716,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03722004433748612,
"min": 0.034992316024236775,
"max": 7.843625839799643,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.4986841677236953,
"min": 3.4986841677236953,
"max": 125.49801343679428,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1756492948",
"python_version": "3.10.12 (main, Aug 15 2025, 14:32:43) [GCC 11.4.0]",
"command_line_arguments": "/home/sam/.local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1756494600"
},
"total": 1549.8057936100013,
"count": 1,
"self": 0.47737452200453845,
"children": {
"run_training.setup": {
"total": 0.024550932997954078,
"count": 1,
"self": 0.024550932997954078
},
"TrainerController.start_learning": {
"total": 1549.3038681549988,
"count": 1,
"self": 1.09274243634718,
"children": {
"TrainerController._reset_env": {
"total": 3.4976133739983197,
"count": 1,
"self": 3.4976133739983197
},
"TrainerController.advance": {
"total": 1544.6385271596519,
"count": 64075,
"self": 1.1245555171408341,
"children": {
"env_step": {
"total": 1033.4813289601589,
"count": 64075,
"self": 831.2906537351737,
"children": {
"SubprocessEnvManager._take_step": {
"total": 201.47321315782756,
"count": 64075,
"self": 3.511273920732492,
"children": {
"TorchPolicy.evaluate": {
"total": 197.96193923709507,
"count": 62564,
"self": 197.96193923709507
}
}
},
"workers": {
"total": 0.7174620671576122,
"count": 64075,
"self": 0.0,
"children": {
"worker_root": {
"total": 1546.785181969084,
"count": 64075,
"is_parallel": true,
"self": 782.2920621200174,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018416909988445695,
"count": 1,
"is_parallel": true,
"self": 0.0008840340015012771,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009576569973432925,
"count": 8,
"is_parallel": true,
"self": 0.0009576569973432925
}
}
},
"UnityEnvironment.step": {
"total": 0.03194311499828473,
"count": 1,
"is_parallel": true,
"self": 0.00018232400543638505,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001579729978402611,
"count": 1,
"is_parallel": true,
"self": 0.0001579729978402611
},
"communicator.exchange": {
"total": 0.031007185996713815,
"count": 1,
"is_parallel": true,
"self": 0.031007185996713815
},
"steps_from_proto": {
"total": 0.0005956319982942659,
"count": 1,
"is_parallel": true,
"self": 0.0001189479953609407,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00047668400293332525,
"count": 8,
"is_parallel": true,
"self": 0.00047668400293332525
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 764.4931198490667,
"count": 64074,
"is_parallel": true,
"self": 10.974096731555619,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.600947237100627,
"count": 64074,
"is_parallel": true,
"self": 7.600947237100627
},
"communicator.exchange": {
"total": 717.8180234088468,
"count": 64074,
"is_parallel": true,
"self": 717.8180234088468
},
"steps_from_proto": {
"total": 28.100052471563686,
"count": 64074,
"is_parallel": true,
"self": 6.626374352519633,
"children": {
"_process_rank_one_or_two_observation": {
"total": 21.473678119044052,
"count": 512592,
"is_parallel": true,
"self": 21.473678119044052
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 510.03264268235216,
"count": 64075,
"self": 2.034144255801948,
"children": {
"process_trajectory": {
"total": 91.01332291253493,
"count": 64075,
"self": 90.74448516553457,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26883774700036156,
"count": 2,
"self": 0.26883774700036156
}
}
},
"_update_policy": {
"total": 416.9851755140153,
"count": 460,
"self": 180.6812752509759,
"children": {
"TorchPPOOptimizer.update": {
"total": 236.3039002630394,
"count": 22818,
"self": 236.3039002630394
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.520014383364469e-07,
"count": 1,
"self": 5.520014383364469e-07
},
"TrainerController._save_models": {
"total": 0.07498463299998548,
"count": 1,
"self": 0.0018816149968188256,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07310301800316665,
"count": 1,
"self": 0.07310301800316665
}
}
}
}
}
}
}