ppo-Huggy / run_logs /timers.json
Muhammad Saqib
train on additional 1m steps
550b646 verified
raw
history blame contribute delete
No virus
17.5 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4002948999404907,
"min": 1.400290846824646,
"max": 1.4044567346572876,
"count": 20
},
"Huggy.Policy.Entropy.sum": {
"value": 70707.890625,
"min": 68126.109375,
"max": 71574.140625,
"count": 20
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.73170731707317,
"min": 66.15725806451613,
"max": 92.73170731707317,
"count": 20
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49426.0,
"min": 49221.0,
"max": 49556.0,
"count": 20
},
"Huggy.Step.mean": {
"value": 2999908.0,
"min": 2049981.0,
"max": 2999908.0,
"count": 20
},
"Huggy.Step.sum": {
"value": 2999908.0,
"min": 2049981.0,
"max": 2999908.0,
"count": 20
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4432694911956787,
"min": 2.442089557647705,
"max": 2.5468037128448486,
"count": 20
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1302.2626953125,
"min": 1302.2626953125,
"max": 1863.03515625,
"count": 20
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7131219303853964,
"min": 3.7131219303853964,
"max": 4.026137485445201,
"count": 20
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1979.0939888954163,
"min": 1979.0939888954163,
"max": 2931.028089404106,
"count": 20
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7131219303853964,
"min": 3.7131219303853964,
"max": 4.026137485445201,
"count": 20
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1979.0939888954163,
"min": 1979.0939888954163,
"max": 2931.028089404106,
"count": 20
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016497817036967413,
"min": 0.014403317138850171,
"max": 0.021526213961381775,
"count": 20
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.032995634073934826,
"min": 0.03035465927775173,
"max": 0.056156814580026554,
"count": 20
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.047286525989572205,
"min": 0.047286525989572205,
"max": 0.0675045739238461,
"count": 20
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.09457305197914441,
"min": 0.09457305197914441,
"max": 0.18123736369113128,
"count": 20
},
"Huggy.Policy.LearningRate.mean": {
"value": 2.3148492284166755e-06,
"min": 2.3148492284166755e-06,
"max": 9.691496769503332e-05,
"count": 20
},
"Huggy.Policy.LearningRate.sum": {
"value": 4.629698456833351e-06,
"min": 4.629698456833351e-06,
"max": 0.00026304131231963336,
"count": 20
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10077158333333334,
"min": 0.10077158333333334,
"max": 0.1323049666666667,
"count": 20
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.2015431666666667,
"min": 0.2015431666666667,
"max": 0.3876803666666667,
"count": 20
},
"Huggy.Policy.Beta.mean": {
"value": 4.850200833333351e-05,
"min": 4.850200833333351e-05,
"max": 0.0016220178366666666,
"count": 20
},
"Huggy.Policy.Beta.sum": {
"value": 9.700401666666702e-05,
"min": 9.700401666666702e-05,
"max": 0.004405250296666668,
"count": 20
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1707229684",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1707230903"
},
"total": 1219.2884672870005,
"count": 1,
"self": 0.4370836470006907,
"children": {
"run_training.setup": {
"total": 0.04692796699964674,
"count": 1,
"self": 0.04692796699964674
},
"TrainerController.start_learning": {
"total": 1218.8044556730001,
"count": 1,
"self": 2.343180128024869,
"children": {
"TrainerController._reset_env": {
"total": 2.1479978630004553,
"count": 1,
"self": 2.1479978630004553
},
"TrainerController.advance": {
"total": 1214.1848600489748,
"count": 118128,
"self": 2.452056089875441,
"children": {
"env_step": {
"total": 974.6812306079783,
"count": 118128,
"self": 807.4357161978896,
"children": {
"SubprocessEnvManager._take_step": {
"total": 165.77706546212357,
"count": 118128,
"self": 8.387665497116359,
"children": {
"TorchPolicy.evaluate": {
"total": 157.38939996500721,
"count": 111823,
"self": 157.38939996500721
}
}
},
"workers": {
"total": 1.4684489479650438,
"count": 118128,
"self": 0.0,
"children": {
"worker_root": {
"total": 1214.8518558820324,
"count": 118128,
"is_parallel": true,
"self": 556.5244112889222,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011551730003702687,
"count": 1,
"is_parallel": true,
"self": 0.00035466300050757127,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008005099998626974,
"count": 2,
"is_parallel": true,
"self": 0.0008005099998626974
}
}
},
"UnityEnvironment.step": {
"total": 0.02979850100018666,
"count": 1,
"is_parallel": true,
"self": 0.0003212329993402818,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020562200006679632,
"count": 1,
"is_parallel": true,
"self": 0.00020562200006679632
},
"communicator.exchange": {
"total": 0.028506298000138486,
"count": 1,
"is_parallel": true,
"self": 0.028506298000138486
},
"steps_from_proto": {
"total": 0.0007653480006410973,
"count": 1,
"is_parallel": true,
"self": 0.00022463200002675876,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005407160006143386,
"count": 2,
"is_parallel": true,
"self": 0.0005407160006143386
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 658.3274445931102,
"count": 118127,
"is_parallel": true,
"self": 20.83874480905888,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 43.2128352530126,
"count": 118127,
"is_parallel": true,
"self": 43.2128352530126
},
"communicator.exchange": {
"total": 547.4930454870992,
"count": 118127,
"is_parallel": true,
"self": 547.4930454870992
},
"steps_from_proto": {
"total": 46.782819043939526,
"count": 118127,
"is_parallel": true,
"self": 17.487023304808645,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.29579573913088,
"count": 236254,
"is_parallel": true,
"self": 29.29579573913088
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 237.05157335112108,
"count": 118128,
"self": 3.40058024413338,
"children": {
"process_trajectory": {
"total": 86.32439864298885,
"count": 118128,
"self": 85.67243265498837,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6519659880004838,
"count": 5,
"self": 0.6519659880004838
}
}
},
"_update_policy": {
"total": 147.32659446399884,
"count": 48,
"self": 116.61226426998746,
"children": {
"TorchPPOOptimizer.update": {
"total": 30.71433019401138,
"count": 1440,
"self": 30.71433019401138
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5639998309779912e-06,
"count": 1,
"self": 1.5639998309779912e-06
},
"TrainerController._save_models": {
"total": 0.12841606900019542,
"count": 1,
"self": 0.0030520170003001112,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1253640519998953,
"count": 1,
"self": 0.1253640519998953
}
}
}
}
}
}
}