{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,18]],"date-time":"2026-03-18T13:25:58Z","timestamp":1773840358257,"version":"3.50.1"},"reference-count":35,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2023,6,1]],"date-time":"2023-06-01T00:00:00Z","timestamp":1685577600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"name":"Research Institute of Trustworthy Autonomous Systems"},{"name":"Guangdong Provincial Key Laboratory","award":["2020B121201001"],"award-info":[{"award-number":["2020B121201001"]}]},{"name":"Program for Guangdong Introducing Innovative and Enterpreneurial Teams","award":["2017ZT07X386"],"award-info":[{"award-number":["2017ZT07X386"]}]},{"name":"Shenzhen Science and Technology Program","award":["KQTD2016112514355531"],"award-info":[{"award-number":["KQTD2016112514355531"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61906083"],"award-info":[{"award-number":["61906083"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Games"],"published-print":{"date-parts":[[2023,6]]},"DOI":"10.1109\/tg.2022.3164242","type":"journal-article","created":{"date-parts":[[2022,4,1]],"date-time":"2022-04-01T20:02:01Z","timestamp":1648843321000},"page":"202-216","source":"Crossref","is-referenced-by-count":9,"title":["Reinforcement Learning With Dual-Observation for General Video Game Playing"],"prefix":"10.1109","volume":"15","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8343-4186","authenticated-orcid":false,"given":"Chengpeng","family":"Hu","sequence":"first","affiliation":[{"name":"Research Institute of Trustworthy Autonomous System (RITAS), Southern University of Science and Technology, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7640-3960","authenticated-orcid":false,"given":"Ziqi","family":"Wang","sequence":"additional","affiliation":[{"name":"Research Institute of Trustworthy Autonomous System (RITAS), Southern University of Science and Technology, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7673-3943","authenticated-orcid":false,"given":"Tianye","family":"Shu","sequence":"additional","affiliation":[{"name":"Research Institute of Trustworthy Autonomous System (RITAS), Southern University of Science and Technology, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4881-8701","authenticated-orcid":false,"given":"Hao","family":"Tong","sequence":"additional","affiliation":[{"name":"Research Institute of Trustworthy Autonomous System (RITAS), Southern University of Science and Technology, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3128-4598","authenticated-orcid":false,"given":"Julian","family":"Togelius","sequence":"additional","affiliation":[{"name":"New York University, New York, NY, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8837-4442","authenticated-orcid":false,"given":"Xin","family":"Yao","sequence":"additional","affiliation":[{"name":"Research Institute of Trustworthy Autonomous System (RITAS), Southern University of Science and Technology, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7047-8454","authenticated-orcid":false,"given":"Jialin","family":"Liu","sequence":"additional","affiliation":[{"name":"Guangdong Provincial Key Laboratory of Brain-Inspired Intelligent Computation, Department of Computer Science and Engineering, Southern University of Science and Technology, Shenzhen, China"}]}],"member":"263","reference":[{"key":"ref13","first-page":"19884","article-title":"Reinforcement learning with augmented data","volume":"33","author":"laskin","year":"2020","journal-title":"Adv Neural Inf Process Syst"},{"key":"ref35","first-page":"1437","article-title":"A comprehensive survey on safe reinforcement learning","volume":"16","author":"garc?a","year":"2015","journal-title":"J Mach Learn Res"},{"key":"ref12","first-page":"1282","article-title":"Quantifying generalization in reinforcement learning","volume":"97","author":"cobbe","year":"0","journal-title":"Proc 36th Int Conf Mach Learn"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/2463372.2463413"},{"key":"ref15","first-page":"2048","article-title":"Leveraging procedural generation to benchmark reinforcement learning","author":"cobbe","year":"0","journal-title":"Proc 37th Int Conf Mach Learn"},{"key":"ref14","article-title":"Illuminating generalization in deep reinforcement learning through procedural level generation","author":"justesen","year":"0","journal-title":"Deep RL Workshop at Neurips"},{"key":"ref31","first-page":"1","article-title":"Sample efficient actor-critic with experience replay","author":"wang","year":"2017","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TCIAIG.2015.2402393"},{"key":"ref11","first-page":"1","article-title":"Image augmentation is all you need: Regularizing deep reinforcement learning from pixels","author":"kostrikov","year":"2021","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-02122-0"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CoG52621.2021.9619141"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/373"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1038\/nature24270"},{"key":"ref1","doi-asserted-by":"crossref","first-page":"484","DOI":"10.1038\/nature16961","article-title":"Mastering the game of go with deep neural networks and tree search","volume":"529","author":"silver","year":"2016","journal-title":"Nature"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TG.2019.2901021"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-02122-0"},{"key":"ref19","first-page":"1","article-title":"An agent-based learning approach for finding and exploiting heuristics in unknown environments","volume":"2052","author":"apeldoorn","year":"2017","journal-title":"Proc 13th Int Symp Commonsense Reasoning"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CEC.2017.7969556"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TCIAIG.2014.2352795"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2013.6633610"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-42716-4"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TCIAIG.2011.2148116"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2018.8490411"},{"key":"ref22","first-page":"1","article-title":"Stable-Baselines3: Reliable Reinforcement Learning Implementations","volume":"22","author":"raffin","year":"2021","journal-title":"J Mach Learn Res"},{"key":"ref21","first-page":"57","article-title":"Rotation, translation, and cropping for zero-shot generalization","author":"ye","year":"0","journal-title":"Proc IEEE Conf Games"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-020-0208-z"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TG.2018.2846639"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/s00521-020-05383-8"},{"key":"ref8","first-page":"1060","article-title":"Discovering Reinforcement Learning Algorithms","volume":"33","author":"oh","year":"0","journal-title":"Adv Neural Inf Process Syst"},{"key":"ref7","article-title":"Assessing generalization in deep reinforcement learning","author":"packer","year":"2018"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10014"},{"key":"ref4","first-page":"1","article-title":"Model-based reinforcement learning for Atari","author":"kaiser","year":"2020","journal-title":"Proc 8th Int Conf Learn Representations"},{"key":"ref3","doi-asserted-by":"crossref","first-page":"604","DOI":"10.1038\/s41586-020-03051-4","article-title":"Mastering Atari, go, chess and shogi by planning with a learned model","volume":"588","author":"schrittwieser","year":"2020","journal-title":"Nature"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2018.8490422"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2016.7860433"}],"container-title":["IEEE Transactions on Games"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7782673\/10153940\/09748033.pdf?arnumber=9748033","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,7,3]],"date-time":"2023-07-03T18:34:41Z","timestamp":1688409281000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9748033\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6]]},"references-count":35,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tg.2022.3164242","relation":{},"ISSN":["2475-1502","2475-1510"],"issn-type":[{"value":"2475-1502","type":"print"},{"value":"2475-1510","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,6]]}}}