{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,7]],"date-time":"2024-09-07T19:33:35Z","timestamp":1725737615644},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,8,17]],"date-time":"2021-08-17T00:00:00Z","timestamp":1629158400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,8,17]],"date-time":"2021-08-17T00:00:00Z","timestamp":1629158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,8,17]],"date-time":"2021-08-17T00:00:00Z","timestamp":1629158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,8,17]]},"DOI":"10.1109\/cog52621.2021.9618997","type":"proceedings-article","created":{"date-parts":[[2021,12,7]],"date-time":"2021-12-07T15:53:06Z","timestamp":1638892386000},"page":"01-04","source":"Crossref","is-referenced-by-count":2,"title":["Distilling Reinforcement Learning Tricks for Video Games"],"prefix":"10.1109","author":[{"given":"Anssi","family":"Kanervisto","sequence":"first","affiliation":[]},{"given":"Christian","family":"Scheller","sequence":"additional","affiliation":[]},{"given":"Yanick","family":"Schraner","sequence":"additional","affiliation":[]},{"given":"Ville","family":"Hautamaki","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Learning to act by predicting the future","author":"dosovitskiy","year":"2017","journal-title":"ICLRE"},{"journal-title":"Mastering real-time strategy games with deep reinforcement learning Mere mortal edition","year":"2021","author":"winter","key":"ref11"},{"journal-title":"The football project","year":"2020","author":"rychlicki","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/482"},{"key":"ref14","article-title":"Align-rudder: Learning from few demonstrations by reward redistribution","author":"patil","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref15","article-title":"Forgetful experience replay in hierarchical reinforcement learning from demonstrations","author":"skrynnik","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref16","article-title":"Augmenting automated game testing with deep reinforcement learning","author":"bergdahl","year":"2020","journal-title":"Cog"},{"key":"ref17","article-title":"Action space shaping in deep reinforcement learning","author":"kanervisto","year":"0","journal-title":"CogSci 2020"},{"key":"ref18","article-title":"Policy invariance under reward transformations: Theory and application to reward shaping","author":"ng","year":"1999","journal-title":"ICML"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/1553374.1553380"},{"journal-title":"Announcing the obstacle tower challenge winners and open source release","year":"2019","author":"juliani","key":"ref4"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TG.2018.2877047"},{"key":"ref6","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v31i1.10827","article-title":"Playing fps games with deep reinforcement learning","author":"lample","year":"2017","journal-title":"AAAI"},{"key":"ref5","article-title":"Retrospective analysis of the 2019 minerl competition on sample efficient reinforcement learning","author":"milani","year":"0","journal-title":"Proceedings of the NeurIPS 2019 Competition and Demonstration Track"},{"key":"ref8","article-title":"Training agent for first-person shooter game with actor-critic curriculum learning","author":"wu","year":"0","journal-title":"ICLR 2017"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1126\/science.aau6249"},{"journal-title":"Dota 2 with large scale deep reinforcement learning","year":"2019","key":"ref2"},{"journal-title":"Competing in the obstacle tower challenge","year":"2019","author":"nichol","key":"ref9"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-019-1724-z"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2016.7860433"},{"journal-title":"Stable baselines3","year":"2019","author":"raffin","key":"ref21"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5878"},{"journal-title":"The minerl competition on sample efficient reinforcement learning using human priors","year":"2019","author":"guss","key":"ref23"}],"event":{"name":"2021 IEEE Conference on Games (CoG)","start":{"date-parts":[[2021,8,17]]},"location":"Copenhagen, Denmark","end":{"date-parts":[[2021,8,20]]}},"container-title":["2021 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9618888\/9618891\/09618997.pdf?arnumber=9618997","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,17]],"date-time":"2023-01-17T10:42:48Z","timestamp":1673952168000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9618997\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,8,17]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/cog52621.2021.9618997","relation":{},"subject":[],"published":{"date-parts":[[2021,8,17]]}}}