{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,4,16]],"date-time":"2025-04-16T05:59:32Z","timestamp":1744783172740,"version":"3.28.0"},"reference-count":23,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,8,1]],"date-time":"2019-08-01T00:00:00Z","timestamp":1564617600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,8,1]],"date-time":"2019-08-01T00:00:00Z","timestamp":1564617600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,8,1]],"date-time":"2019-08-01T00:00:00Z","timestamp":1564617600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,8]]},"DOI":"10.1109\/cig.2019.8848003","type":"proceedings-article","created":{"date-parts":[[2019,9,27]],"date-time":"2019-09-27T01:49:14Z","timestamp":1569548954000},"page":"1-4","source":"Crossref","is-referenced-by-count":21,"title":["Deep Reinforcement Learning in Match-3 Game"],"prefix":"10.1109","author":[{"given":"Ildar","family":"Kamaldinov","sequence":"first","affiliation":[]},{"given":"Ilya","family":"Makarov","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Predicting Game Level Difficulty Us-ing Deep Neural Networks","author":"purmonen","year":"2017","journal-title":"Tech Rep"},{"key":"ref11","article-title":"Simulating Human Game Play for Level Difficulty Estimation with Convolutional Neural Networks","author":"eisen","year":"0","journal-title":"Tech Rep"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2018.8490442"},{"key":"ref13","first-page":"282","article-title":"Bandit based monte-carlo planning","author":"kocsis","year":"2006","journal-title":"ECML-06 Number 4212 in LNCS"},{"article-title":"Mastering Chess and Shogi by Self-Play with a General Reinforcement Learning Algorithm","year":"2017","author":"silver","key":"ref14"},{"article-title":"Xception: Deep Learning with Depthwise Separable Convolutions","year":"2016","author":"chollet","key":"ref15"},{"key":"ref16","article-title":"Playing atari with deep reinforcement learning","author":"mnih","year":"2013","journal-title":"CoRR"},{"article-title":"Proximal Policy Optimization Algorithms","year":"2017","author":"schulman","key":"ref17"},{"article-title":"Asynchronous Methods for Deep Reinforcement Learning","year":"2016","author":"mnih","key":"ref18"},{"article-title":"RLlib: Abstractions for Distributed Reinforcement Learning","year":"2017","author":"liang","key":"ref19"},{"key":"ref4","article-title":"DeepMind Control Suite","author":"tassa","year":"2018","journal-title":"Tech Rep"},{"key":"ref3","volume":"135","author":"sutton","year":"1998","journal-title":"Introduction to Reinforcement Learning"},{"key":"ref6","doi-asserted-by":"crossref","DOI":"10.1016\/j.artint.2019.103216","article-title":"The Hanabi Challenge: A New Frontier for AI Research","author":"bard","year":"2019"},{"article-title":"AI Safety Gridworlds","year":"2017","author":"leike","key":"ref5"},{"article-title":"Neural MMO: A Massively Multiagent Game Environment for Training and Evaluating Intelligent Agents","year":"2019","author":"suarez","key":"ref8"},{"article-title":"Emergent Coordination Through Competition","year":"2019","author":"liu","key":"ref7"},{"article-title":"OpenAI Gym","year":"2016","author":"brockman","key":"ref2"},{"key":"ref1","article-title":"Crushing Candy Crush","author":"poromaa","year":"0","journal-title":"Tech Rep"},{"article-title":"Quantifying Generalization in Reinforcement Learning","year":"2018","author":"cobbe","key":"ref9"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TCIAIG.2012.2186810"},{"key":"ref22","first-page":"1","article-title":"Learning to play pong video game via deep reinforcement learning","author":"makarov","year":"2017","journal-title":"CEUR WP"},{"key":"ref21","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref23","first-page":"1","article-title":"Deep reinforcement learning in vizdoom first-person shooter for health gathering scenario","author":"akimov","year":"2019","journal-title":"MMEDIA2010"}],"event":{"name":"2019 IEEE Conference on Games (CoG)","start":{"date-parts":[[2019,8,20]]},"location":"London, United Kingdom","end":{"date-parts":[[2019,8,23]]}},"container-title":["2019 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8844551\/8847948\/08848003.pdf?arnumber=8848003","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T14:47:06Z","timestamp":1658155626000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8848003\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,8]]},"references-count":23,"URL":"https:\/\/doi.org\/10.1109\/cig.2019.8848003","relation":{},"subject":[],"published":{"date-parts":[[2019,8]]}}}