{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T01:57:39Z","timestamp":1775181459033,"version":"3.50.1"},"reference-count":35,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,8,17]],"date-time":"2021-08-17T00:00:00Z","timestamp":1629158400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,8,17]],"date-time":"2021-08-17T00:00:00Z","timestamp":1629158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,8,17]],"date-time":"2021-08-17T00:00:00Z","timestamp":1629158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100009091","name":"FCT","doi-asserted-by":"publisher","award":["SFRH\/BD\/129445\/2017"],"award-info":[{"award-number":["SFRH\/BD\/129445\/2017"]}],"id":[{"id":"10.13039\/501100009091","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001871","name":"Foundation for Science and Technology","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001871","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004895","name":"European Social Fund","doi-asserted-by":"publisher","award":["SFRH\/BD\/129445\/2017"],"award-info":[{"award-number":["SFRH\/BD\/129445\/2017"]}],"id":[{"id":"10.13039\/501100004895","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,8,17]]},"DOI":"10.1109\/cog52621.2021.9618985","type":"proceedings-article","created":{"date-parts":[[2021,12,7]],"date-time":"2021-12-07T20:53:06Z","timestamp":1638910386000},"page":"01-08","source":"Crossref","is-referenced-by-count":13,"title":["VGC AI Competition - A New Model of Meta-Game Balance AI Competition"],"prefix":"10.1109","author":[{"given":"Simao","family":"Reis","sequence":"first","affiliation":[]},{"given":"Luis Paulo","family":"Reis","sequence":"additional","affiliation":[]},{"given":"Nuno","family":"Lau","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref33","first-page":"1","article-title":"A self-play policy optimization approach to battling pok&#x00E9;mon","author":"huang","year":"0","journal-title":"2019 IEEE Conference on Games (COG)"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1613\/jair.2628"},{"key":"ref31","first-page":"209","article-title":"Convergence and no-regret in multiagent learning","author":"bowling","year":"2004","journal-title":"Proceedings of the 17th International Conference on Neural Information Processing Systems ser NIPS'04"},{"key":"ref30","article-title":"Playing atari with deep reinforcement learning","volume":"abs 1312 5602","author":"mnih","year":"2013","journal-title":"CoRR"},{"key":"ref35","author":"nachum","year":"2018","journal-title":"Data-efficient hierarchical reinforcement learning"},{"key":"ref34","article-title":"Pykalitics","year":"2017","journal-title":"Pykalitics"},{"key":"ref10","article-title":"Introducing the hearthstone-ai competition","author":"dockhorn","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref11","author":"jakub kowalski","year":"2019","journal-title":"Strategy card game ai competition cog 2019"},{"key":"ref12","first-page":"4335","article-title":"General video game ai: Competition, challenges, and opportunities","author":"perez-liebana","year":"0","journal-title":"Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence ser AAAI'16"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TG.2019.2901021"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2013.6633610"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/2908812.2908920"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2017.8080431"},{"key":"ref17","first-page":"411","article-title":"Ludii - the ludemic general game system","volume":"325","author":"piette","year":"2020","journal-title":"ECAI 2020 24th European Conference on Artificial Intelligence ser Frontiers in Artificial Intelligence and Applications"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2019.8848084"},{"key":"ref19","author":"zarel","year":"2019","journal-title":"Pok&#x00E9;mon showdown"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-70836-2_11"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1126\/science.aam6960"},{"key":"ref3","article-title":"Mastering atari, go, chess and shogi by planning with a learned model","author":"schrittwieser","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref27","article-title":"Predicting pok&#x00E9;mon battle winner using machine learning","author":"charde","year":"0","journal-title":"Submitted to conference Web shorturl at\/kGRS3"},{"key":"ref6","first-page":"17","article-title":"Which games should we (ai) explore next?","volume":"130","author":"hoover","year":"2019","journal-title":"Artificial General Intelligence in Games Where Play Meets Design and User Experience"},{"key":"ref29","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume":"48","author":"mnih","year":"2016","journal-title":"Proceedings of The 33rd International Conference on Machine Learning ser Proceedings of Machine Learning Research"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1126\/science.aao1733"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TG.2018.2883499"},{"key":"ref7","author":"tpc","year":"2020","journal-title":"2020 pok&#x00E9;mon video game championships (vgc) format rules"},{"key":"ref2","doi-asserted-by":"crossref","first-page":"354","DOI":"10.1038\/nature24270","article-title":"Mastering the game of go without human knowledge","volume":"550","author":"silver","year":"2017","journal-title":"Nature"},{"key":"ref1","article-title":"Dota 2 with large scale deep reinforcement learning","volume":"abs 1912 6680","author":"berner","year":"2019","journal-title":"ArXiv"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/GCCE.2013.6664844"},{"key":"ref20","article-title":"Pok&#x00E9;mon online","year":"2019","journal-title":"coyotte508"},{"key":"ref22","article-title":"Openai gym","author":"brockman","year":"2016","journal-title":"ArXiv Preprint"},{"key":"ref21","first-page":"40","article-title":"Competitive deep reinforcement learning over a pok&#x00E9;mon battling simulator","author":"sim\u00f5es","year":"0","journal-title":"2020 IEEE International Conference on Autonomous Robot Systems and Competitions (ICARSC)"},{"key":"ref24","author":"alfonso","year":"2019","journal-title":"Pok&#x00E9;mon battle"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2017.8080435"},{"key":"ref26","volume":"135","author":"sutton","year":"1998","journal-title":"Introduction to Reinforcement Learning"},{"key":"ref25","author":"kalose","year":"2018","journal-title":"Optimal battle strategy in poke-mon using reinforcement learning"}],"event":{"name":"2021 IEEE Conference on Games (CoG)","location":"Copenhagen, Denmark","start":{"date-parts":[[2021,8,17]]},"end":{"date-parts":[[2021,8,20]]}},"container-title":["2021 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9618888\/9618891\/09618985.pdf?arnumber=9618985","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T16:53:32Z","timestamp":1652201612000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9618985\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,8,17]]},"references-count":35,"URL":"https:\/\/doi.org\/10.1109\/cog52621.2021.9618985","relation":{},"subject":[],"published":{"date-parts":[[2021,8,17]]}}}