{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,5]],"date-time":"2025-11-05T11:22:38Z","timestamp":1762341758320},"reference-count":18,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,8,17]],"date-time":"2021-08-17T00:00:00Z","timestamp":1629158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,8,17]],"date-time":"2021-08-17T00:00:00Z","timestamp":1629158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,8,17]]},"DOI":"10.1109\/cog52621.2021.9619004","type":"proceedings-article","created":{"date-parts":[[2021,12,7]],"date-time":"2021-12-07T20:53:06Z","timestamp":1638910386000},"page":"01-05","source":"Crossref","is-referenced-by-count":2,"title":["An Approach to Partial Observability in Games: Learning to Both Act and Observe"],"prefix":"10.1109","author":[{"given":"Elizabeth","family":"Gilmour","sequence":"first","affiliation":[{"name":"Naval Center for Applied Research in A.I. U.S. Naval Research Laboratory,Washington D.C.,U.S.A"}]},{"given":"Noah","family":"Plotkin","sequence":"additional","affiliation":[{"name":"Oberlin College,Oberlin,OH,U.S.A"}]},{"given":"Leslie N.","family":"Smith","sequence":"additional","affiliation":[{"name":"Naval Center for Applied Research in A.I. U.S. Naval Research Laboratory,Washington D.C.,U.S.A"}]}],"member":"263","reference":[{"key":"ref1","article-title":"A survey of deep reinforcement learning in video games","author":"Shao","year":"2019","journal-title":"arXiv preprint"},{"key":"ref2","first-page":"507","article-title":"Agent57: Outperforming the Atari human benchmark","volume-title":"International Conference on Machine Learning","author":"Badia","year":"2020"},{"key":"ref3","article-title":"Playing Atari with deep reinforcement learning","author":"Mnih","year":"2013","journal-title":"arXiv preprint"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-019-1724-z"},{"key":"ref6","article-title":"Starcraft II: A new challenge for reinforcement learning","author":"Vinyals","year":"2017","journal-title":"arXiv preprint"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00135"},{"key":"ref8","article-title":"Learning to predict where to look in interactive environments using deep recurrent Q-learning","author":"Mousavi","year":"2016","journal-title":"arXiv preprint"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3377930.3389847"},{"key":"ref10","article-title":"Uncertainty maximization in partially observable domains: A cognitive perspective","author":"Ramicic","year":"2021","journal-title":"arXiv preprint"},{"key":"ref11","article-title":"Learning invariant representations for reinforcement learning without reconstruction","author":"Zhang","year":"2020","journal-title":"arXiv preprint"},{"key":"ref12","first-page":"2204","article-title":"Recurrent models of visual attention","author":"Mnih","year":"2014","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref13","article-title":"Transformers in vision: A survey","author":"Khan","year":"2021","journal-title":"arXiv preprint"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2019.00522"},{"key":"ref15","article-title":"Towards interpretable reinforcement learning using attention augmented agents","author":"Mott","year":"2019","journal-title":"arXiv preprint"},{"key":"ref16","article-title":"Exploratory not explanatory: Counterfactual analysis of saliency maps for deep reinforcement learning","author":"Atrey","year":"2019","journal-title":"arXiv preprint"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11798"},{"key":"ref18","first-page":"1","article-title":"Curriculum learning for reinforcement learning domains: A framework and survey","author":"Narvekar","year":"2020","journal-title":"Journal of Machine Learning Research"}],"event":{"name":"2021 IEEE Conference on Games (CoG)","start":{"date-parts":[[2021,8,17]]},"location":"Copenhagen, Denmark","end":{"date-parts":[[2021,8,20]]}},"container-title":["2021 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9618888\/9618891\/09619004.pdf?arnumber=9619004","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,28]],"date-time":"2024-09-28T05:20:26Z","timestamp":1727500826000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9619004\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,8,17]]},"references-count":18,"URL":"https:\/\/doi.org\/10.1109\/cog52621.2021.9619004","relation":{},"subject":[],"published":{"date-parts":[[2021,8,17]]}}}