{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T16:12:34Z","timestamp":1774627954198,"version":"3.50.1"},"reference-count":20,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,8,17]],"date-time":"2021-08-17T00:00:00Z","timestamp":1629158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,8,17]],"date-time":"2021-08-17T00:00:00Z","timestamp":1629158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,8,17]]},"DOI":"10.1109\/cog52621.2021.9619053","type":"proceedings-article","created":{"date-parts":[[2021,12,7]],"date-time":"2021-12-07T20:53:06Z","timestamp":1638910386000},"page":"1-8","source":"Crossref","is-referenced-by-count":36,"title":["Adversarial Reinforcement Learning for Procedural Content Generation"],"prefix":"10.1109","author":[{"given":"Linus","family":"Gisslen","sequence":"first","affiliation":[{"name":"SEED - Electronic Arts (EA)"}]},{"given":"Andy","family":"Eakins","sequence":"additional","affiliation":[{"name":"Frostbite - Electronic Arts (EA)"}]},{"given":"Camilo","family":"Gordillo","sequence":"additional","affiliation":[{"name":"SEED - Electronic Arts (EA)"}]},{"given":"Joakim","family":"Bergdahl","sequence":"additional","affiliation":[{"name":"SEED - Electronic Arts (EA)"}]},{"given":"Konrad","family":"Tollmar","sequence":"additional","affiliation":[{"name":"SEED - Electronic Arts (EA)"}]}],"member":"263","reference":[{"key":"ref10","article-title":"Pcgrl: Procedural content generation via reinforcement learning","author":"khalifa","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref11","article-title":"Fully differentiable procedural content generation through generative playing networks","author":"bontrager","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref12","article-title":"Paired open-ended trailblazer (poet): Endlessly generating increasingly complex and diverse learning environments and their solutions","author":"wang","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref13","first-page":"835","article-title":"Teacher algorithms for curriculum learning of deep rl in continuously parameterized environments","author":"portelas","year":"0","journal-title":"Conference on Robot Learning"},{"key":"ref14","article-title":"Adversarial reinforcement learning","author":"uther","year":"1997","journal-title":"Carnegie Mellon University Unpublished Tech Rep"},{"key":"ref15","article-title":"Robust adversarial reinforcement learning","author":"pinto","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref16","article-title":"Reinforcement learning with unsupervised auxiliary tasks","author":"jaderberg","year":"2016","journal-title":"ArXiv Preprint"},{"key":"ref17","article-title":"Illuminating generalization in deep reinforcement learning through procedural level generation","author":"justesen","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref18","article-title":"Generative adversarial networks","author":"goodfellow","year":"2014","journal-title":"ArXiv Preprint"},{"key":"ref19","article-title":"Unity: A general platform for intelligent agents","author":"juliani","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/s40869-017-0043-6"},{"key":"ref3","article-title":"As-sessing generalization in deep reinforcement learning","author":"packer","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2018.8490398"},{"key":"ref5","doi-asserted-by":"crossref","first-page":"66","DOI":"10.1609\/aiide.v16i1.7409","article-title":"Using deep convolutional neural networks to detect rendered glitches in video games","volume":"16","author":"ling","year":"0","journal-title":"Proceedings of the AAAI Conference on Artificial Intelligence and Interactive Digital Entertainment"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CoG47356.2020.9231552"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ASE.2019.00077"},{"key":"ref2","article-title":"A survey of deep reinforcement learning in video games","author":"shao","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref1","article-title":"Dota 2 with large scale deep reinforcement learning","author":"berner","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref9","first-page":"1","article-title":"Increasing generality in machine learning through procedural content generation","author":"risi","year":"2020","journal-title":"Nature Mach Intell"},{"key":"ref20","first-page":"310","article-title":"A generalized reinforcement-learning model: Convergence and applications","volume":"96","author":"littman","year":"1996","journal-title":"ICML"}],"event":{"name":"2021 IEEE Conference on Games (CoG)","location":"Copenhagen, Denmark","start":{"date-parts":[[2021,8,17]]},"end":{"date-parts":[[2021,8,20]]}},"container-title":["2021 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9618888\/9618891\/09619053.pdf?arnumber=9619053","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,17]],"date-time":"2023-01-17T15:43:16Z","timestamp":1673970196000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9619053\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,8,17]]},"references-count":20,"URL":"https:\/\/doi.org\/10.1109\/cog52621.2021.9619053","relation":{},"subject":[],"published":{"date-parts":[[2021,8,17]]}}}