{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,13]],"date-time":"2026-04-13T16:17:07Z","timestamp":1776097027638,"version":"3.50.1"},"reference-count":44,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,8,1]],"date-time":"2020-08-01T00:00:00Z","timestamp":1596240000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,8,1]],"date-time":"2020-08-01T00:00:00Z","timestamp":1596240000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,8,1]],"date-time":"2020-08-01T00:00:00Z","timestamp":1596240000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,8]]},"DOI":"10.1109\/cog47356.2020.9231687","type":"proceedings-article","created":{"date-parts":[[2020,10,21]],"date-time":"2020-10-21T14:05:50Z","timestamp":1603289150000},"page":"479-486","source":"Crossref","is-referenced-by-count":84,"title":["Action Space Shaping in Deep Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Anssi","family":"Kanervisto","sequence":"first","affiliation":[]},{"given":"Christian","family":"Scheller","sequence":"additional","affiliation":[]},{"given":"Ville","family":"Hautamaki","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","article-title":"Stable baselines","author":"hill","year":"2018"},{"key":"ref38","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017"},{"key":"ref33","article-title":"Alphastar: Mastering the real-time strategy game starcraft ii","author":"vinyals","year":"2019"},{"key":"ref32","article-title":"Deep reinforcement learning with relational inductive biases","author":"zambaldi","year":"2019","journal-title":"ICLRE"},{"key":"ref31","article-title":"Training agent for first-person shooter game with actor-critic curriculum learning","author":"wu","year":"2017","journal-title":"ICLRE"},{"key":"ref30","article-title":"Learning to act by predicting the future","author":"dosovitskiy","year":"2017","journal-title":"ICLRE"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2018.8451491"},{"key":"ref36","article-title":"Impala: Scalable distributed deep-rl with importance weighted actor-learner architectures","author":"espeholt","year":"2018","journal-title":"ICML"},{"key":"ref35","article-title":"Deepmind lab","author":"beattie","year":"2016"},{"key":"ref34","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"ICML"},{"key":"ref10","article-title":"The minerl competition on sample-efficient reinforcement learning using human priors: A retrospective","author":"milani","year":"2020"},{"key":"ref40","article-title":"RLlib: Abstractions for distributed reinforcement learning","author":"liang","year":"2018","journal-title":"ICML"},{"key":"ref11","article-title":"Policy invariance under reward transformations: Theory and application to reward shaping","author":"ng","year":"1999","journal-title":"ICML"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2018.8490398"},{"key":"ref13","article-title":"The minerl competition on sample efficient reinforcement learning using human priors","author":"guss","year":"2019"},{"key":"ref14","article-title":"Beating the world&#x2019;s best at super smash bros. with deep reinforcement learning","author":"firoiu","year":"2017"},{"key":"ref15","article-title":"Reinforcement learning from imperfect demonstrations","author":"gao","year":"2018","journal-title":"ICML"},{"key":"ref16","article-title":"Issues in using function approximation for reinforcement learning","author":"thrun","year":"1993","journal-title":"Proceedings of the 1993 Connectionist Models Summer School"},{"key":"ref17","article-title":"Learn what not to learn: Action elimination with deep reinforcement learning","author":"zahavy","year":"2018","journal-title":"NIPS"},{"key":"ref18","article-title":"Deep reinforcement learning in large discrete action spaces","author":"dulac-arnold","year":"2015"},{"key":"ref19","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v32i1.11798","article-title":"Action branching architectures for deep reinforcement learning","author":"tavakoli","year":"2018","journal-title":"AAAI"},{"key":"ref28","article-title":"Sample efficient reinforcement learning through learning from demonstrations in minecraft","author":"scheller","year":"2020"},{"key":"ref4","article-title":"Human-level performance in first-person multiplayer games with population-based deep reinforcement learning","author":"jaderberg","year":"2018"},{"key":"ref27","article-title":"Hierarchical deep q-network from imperfect demonstrations in minecraft","author":"skrynnik","year":"2019"},{"key":"ref3","article-title":"Dota 2 with large scale deep reinforcement learning","year":"2019"},{"key":"ref6","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref29","article-title":"Competing in the obstacle tower challenge","author":"nichol","year":"2019"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TG.2018.2877047"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2016.7860433"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.6144"},{"key":"ref2","first-page":"1","article-title":"Grandmaster level in starcraft ii using multi-agent reinforcement learning","author":"vinyals","year":"2019","journal-title":"Nature"},{"key":"ref9","article-title":"The malmo platform for artificial intelligence experimentation","author":"johnson","year":"2016","journal-title":"IJCAI"},{"key":"ref1","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref20","article-title":"Continuous deep q-learning with model-based acceleration","author":"gu","year":"2016","journal-title":"ICML"},{"key":"ref22","article-title":"Discrete and continuous action representation for practical rl in video games","author":"delalleau","year":"2019","journal-title":"Proc AAAI Workshop Reinforcement Learn Games"},{"key":"ref21","article-title":"Starcraft ii: A new challenge for reinforcement learning","author":"vinyals","year":"2017"},{"key":"ref42","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2019.8847955"},{"key":"ref41","article-title":"Bench-marking deep reinforcement learning for continuous control","author":"duan","year":"2016","journal-title":"ICML"},{"key":"ref23","article-title":"Openai gym","author":"brockman","year":"2016"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/373"},{"key":"ref26","article-title":"Improving stochastic policy gradients in continuous control with deep reinforcement learning using the beta distribution","author":"chou","year":"2017","journal-title":"ICML"},{"key":"ref43","article-title":"Rl baselines zoo","author":"raffin","year":"2018"},{"key":"ref25","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","author":"haarnoja","year":"2018","journal-title":"ICML"}],"event":{"name":"2020 IEEE Conference on Games (CoG)","location":"Osaka, Japan","start":{"date-parts":[[2020,8,24]]},"end":{"date-parts":[[2020,8,27]]}},"container-title":["2020 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9222389\/9231525\/09231687.pdf?arnumber=9231687","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,24]],"date-time":"2022-11-24T00:21:53Z","timestamp":1669249313000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9231687\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,8]]},"references-count":44,"URL":"https:\/\/doi.org\/10.1109\/cog47356.2020.9231687","relation":{},"subject":[],"published":{"date-parts":[[2020,8]]}}}