{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,16]],"date-time":"2026-02-16T15:39:02Z","timestamp":1771256342720,"version":"3.50.1"},"reference-count":23,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,8,17]],"date-time":"2021-08-17T00:00:00Z","timestamp":1629158400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,8,17]],"date-time":"2021-08-17T00:00:00Z","timestamp":1629158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,8,17]],"date-time":"2021-08-17T00:00:00Z","timestamp":1629158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,8,17]]},"DOI":"10.1109\/cog52621.2021.9619068","type":"proceedings-article","created":{"date-parts":[[2021,12,7]],"date-time":"2021-12-07T20:53:06Z","timestamp":1638910386000},"page":"1-8","source":"Crossref","is-referenced-by-count":1,"title":["Hierarchical Advantage for Reinforcement Learning in Parameterized Action Space"],"prefix":"10.1109","author":[{"given":"Zhejie","family":"Hu","sequence":"first","affiliation":[]},{"given":"Tomoyuki","family":"Kaneko","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Deep reinforcement learning in parameterized action space","author":"hausknecht","year":"0","journal-title":"International Conference on Learning Representations(ICLR)"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-74024-7_7"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/267658.267738"},{"key":"ref13","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"ArXiv Preprint"},{"key":"ref14","article-title":"Reinforcement learning with parameterized actions","volume":"30","author":"masson","year":"0","journal-title":"Proceedings of the AAAI Conference on Artificial Intelligence"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref16","article-title":"High-dimensional continuous control using generalized advantage estimation","author":"schulman","year":"0","journal-title":"Proceedings of the International Conference on Learning Representations (ICLR)"},{"key":"ref17","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref18","doi-asserted-by":"crossref","first-page":"484","DOI":"10.1038\/nature16961","article-title":"Mastering the game of go with deep neural networks and tree search","volume":"529","author":"silver","year":"2016","journal-title":"Nature"},{"key":"ref19","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref4","article-title":"Discrete and continuous action representation for practical rl in video","author":"delalleau","year":"0","journal-title":"Association for the Advancement of Artificial Intelligence Workshop on Reinforcement Learning in Games"},{"key":"ref3","article-title":"Multi-pass q-networks for deep reinforcement learning with parameterised action spaces","author":"bester","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/316"},{"key":"ref5","author":"dhariwal","year":"2017","journal-title":"OpenAI Baselines"},{"key":"ref8","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","author":"haarnoja","year":"0","journal-title":"International Conference on Machine Learning"},{"key":"ref7","first-page":"1352","article-title":"Reinforcement learning with deep energy-based policies","author":"haarnoja","year":"0","journal-title":"International Conference on Machine Learning"},{"key":"ref2","article-title":"What matters in on-policy reinforcement learning? a large-scale empirical study","author":"andrychowicz","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref1","first-page":"528","volume":"8371","author":"akiyama","year":"2014","journal-title":"Helios base An open source package for the robocup soccer 2d simulation"},{"key":"ref9","article-title":"Half field offense: An environment for multiagent learning and ad hoc teamwork","author":"hausknecht","year":"0","journal-title":"AAMAS Adaptive Learning Agents (ALA) Workshop sn"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.2307\/2276774"},{"key":"ref21","article-title":"Hierarchical approaches for reinforcement learning in parameterized action space","author":"wei","year":"2018","journal-title":"Association for the Advancement of Artificial Intelligence"},{"key":"ref23","article-title":"Parametrized deep q-networks learning: Reinforcement learning with discrete-continuous hybrid action space","author":"xiong","year":"2018","journal-title":"ArXiv Preprint"}],"event":{"name":"2021 IEEE Conference on Games (CoG)","location":"Copenhagen, Denmark","start":{"date-parts":[[2021,8,17]]},"end":{"date-parts":[[2021,8,20]]}},"container-title":["2021 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9618888\/9618891\/09619068.pdf?arnumber=9619068","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T16:53:34Z","timestamp":1652201614000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9619068\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,8,17]]},"references-count":23,"URL":"https:\/\/doi.org\/10.1109\/cog52621.2021.9619068","relation":{},"subject":[],"published":{"date-parts":[[2021,8,17]]}}}