sergey-antonov's picture
v2
7c6f80d
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.04420804977417,
"min": 1.0356214046478271,
"max": 2.8895699977874756,
"count": 100
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 11176.158203125,
"min": 9250.1708984375,
"max": 38364.8203125,
"count": 100
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 100
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 13134.0,
"count": 100
},
"SnowballTarget.Step.mean": {
"value": 999800.0,
"min": 9800.0,
"max": 999800.0,
"count": 100
},
"SnowballTarget.Step.sum": {
"value": 999800.0,
"min": 9800.0,
"max": 999800.0,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.088557243347168,
"min": 0.036347873508930206,
"max": 14.088557243347168,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 704.4278564453125,
"min": 1.7810457944869995,
"max": 704.4278564453125,
"count": 100
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.38,
"min": 2.2244897959183674,
"max": 27.82,
"count": 100
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1369.0,
"min": 109.0,
"max": 1391.0,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.38,
"min": 2.2244897959183674,
"max": 27.82,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1369.0,
"min": 109.0,
"max": 1391.0,
"count": 100
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.047655855384672416,
"min": 0.0408383085259451,
"max": 0.06143570404931191,
"count": 100
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.047655855384672416,
"min": 0.0408383085259451,
"max": 0.10283301106263773,
"count": 100
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19121214951954635,
"min": 0.08641690877722759,
"max": 0.30546318856524485,
"count": 100
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.19121214951954635,
"min": 0.08641690877722759,
"max": 0.5694118307209481,
"count": 100
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.6800994400000145e-06,
"min": 1.6800994400000145e-06,
"max": 0.00029736000088000005,
"count": 100
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.6800994400000145e-06,
"min": 1.6800994400000145e-06,
"max": 0.0005551200149599997,
"count": 100
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10055999999999998,
"min": 0.10055999999999998,
"max": 0.19912000000000005,
"count": 100
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.10055999999999998,
"min": 0.10055999999999998,
"max": 0.38504000000000005,
"count": 100
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.594400000000048e-05,
"min": 6.594400000000048e-05,
"max": 0.009912088000000001,
"count": 100
},
"SnowballTarget.Policy.Beta.sum": {
"value": 6.594400000000048e-05,
"min": 6.594400000000048e-05,
"max": 0.018505496000000003,
"count": 100
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676495914",
"python_version": "3.9.7 (default, Sep 16 2021, 13:09:58) \n[GCC 7.5.0]",
"command_line_arguments": "/home/hit/app/hf39/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1",
"numpy_version": "1.23.5",
"end_time_seconds": "1676497012"
},
"total": 1098.1144125289998,
"count": 1,
"self": 0.2188806049998675,
"children": {
"run_training.setup": {
"total": 0.009018022999953246,
"count": 1,
"self": 0.009018022999953246
},
"TrainerController.start_learning": {
"total": 1097.886513901,
"count": 1,
"self": 1.7878642791042694,
"children": {
"TrainerController._reset_env": {
"total": 0.4278517039997496,
"count": 1,
"self": 0.4278517039997496
},
"TrainerController.advance": {
"total": 1095.5793983678964,
"count": 91022,
"self": 0.8657905448471865,
"children": {
"env_step": {
"total": 1094.7136078230492,
"count": 91022,
"self": 797.2466852172038,
"children": {
"SubprocessEnvManager._take_step": {
"total": 296.61936874482626,
"count": 91022,
"self": 3.3954131148229862,
"children": {
"TorchPolicy.evaluate": {
"total": 293.2239556300033,
"count": 91022,
"self": 33.628078278999055,
"children": {
"TorchPolicy.sample_actions": {
"total": 259.5958773510042,
"count": 91022,
"self": 259.5958773510042
}
}
}
}
},
"workers": {
"total": 0.8475538610191506,
"count": 91022,
"self": 0.0,
"children": {
"worker_root": {
"total": 1095.8596509680046,
"count": 91022,
"is_parallel": true,
"self": 488.3935832669372,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001957545000095706,
"count": 1,
"is_parallel": true,
"self": 0.0005244860003585927,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014330589997371135,
"count": 10,
"is_parallel": true,
"self": 0.0014330589997371135
}
}
},
"UnityEnvironment.step": {
"total": 0.01675234099957379,
"count": 1,
"is_parallel": true,
"self": 0.0003450789995440573,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001992150000660331,
"count": 1,
"is_parallel": true,
"self": 0.0001992150000660331
},
"communicator.exchange": {
"total": 0.015270833000158746,
"count": 1,
"is_parallel": true,
"self": 0.015270833000158746
},
"steps_from_proto": {
"total": 0.0009372139998049533,
"count": 1,
"is_parallel": true,
"self": 0.00019149999980072607,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007457140000042273,
"count": 10,
"is_parallel": true,
"self": 0.0007457140000042273
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 607.4660677010675,
"count": 91021,
"is_parallel": true,
"self": 32.014875892931286,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 14.919763227038857,
"count": 91021,
"is_parallel": true,
"self": 14.919763227038857
},
"communicator.exchange": {
"total": 476.39996817208794,
"count": 91021,
"is_parallel": true,
"self": 476.39996817208794
},
"steps_from_proto": {
"total": 84.13146040900938,
"count": 91021,
"is_parallel": true,
"self": 16.624615560942402,
"children": {
"_process_rank_one_or_two_observation": {
"total": 67.50684484806698,
"count": 910210,
"is_parallel": true,
"self": 67.50684484806698
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0006072729993320536,
"count": 1,
"self": 0.0006072729993320536,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1093.7925792752048,
"count": 611929,
"is_parallel": true,
"self": 9.00737866921827,
"children": {
"process_trajectory": {
"total": 501.4968359249847,
"count": 611929,
"is_parallel": true,
"self": 499.85578665198636,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6410492729983162,
"count": 10,
"is_parallel": true,
"self": 1.6410492729983162
}
}
},
"_update_policy": {
"total": 583.2883646810019,
"count": 113,
"is_parallel": true,
"self": 140.1766088680415,
"children": {
"TorchPPOOptimizer.update": {
"total": 443.1117558129604,
"count": 11526,
"is_parallel": true,
"self": 443.1117558129604
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09079227700021875,
"count": 1,
"self": 0.000821347000055539,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08997093000016321,
"count": 1,
"self": 0.08997093000016321
}
}
}
}
}
}
}