{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T13:40:55Z","timestamp":1730209255652,"version":"3.28.0"},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,8,21]]},"DOI":"10.1109\/cog51982.2022.9893712","type":"proceedings-article","created":{"date-parts":[[2022,9,20]],"date-time":"2022-09-20T15:33:31Z","timestamp":1663688011000},"page":"504-507","source":"Crossref","is-referenced-by-count":0,"title":["Mjx: A framework for Mahjong AI research"],"prefix":"10.1109","author":[{"given":"Sotetsu","family":"Koyamada","sequence":"first","affiliation":[{"name":"Kyoto University,Graduate School of Informatics,Kyoto,Japan"}]},{"given":"Keigo","family":"Habara","sequence":"additional","affiliation":[{"name":"Kyoto University,Graduate School of Informatics,Kyoto,Japan"}]},{"given":"Nao","family":"Goto","sequence":"additional","affiliation":[{"name":"Kyoto University,Graduate School of Informatics,Kyoto,Japan"}]},{"given":"Shinri","family":"Okano","sequence":"additional","affiliation":[{"name":"Kyoto University,Faculty of Science,Kyoto,Japan"}]},{"given":"Soichiro","family":"Nishimori","sequence":"additional","affiliation":[{"name":"Kyoto University,Faculty of Integrated Human Studies,Kyoto,Japan"}]},{"given":"Shin","family":"Ishii","sequence":"additional","affiliation":[{"name":"Kyoto University,Graduate School of Informatics,Kyoto,Japan"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1613\/jair.3912"},{"journal-title":"Mjai Game server for Japanese mahjong AI","year":"2022","author":"ichikawa","key":"ref11"},{"journal-title":"Rules for Japanese Mahjong","year":"2022","key":"ref12"},{"key":"ref13","article-title":"RLCard: A toolkit for reinforcement learning in card games","author":"zha","year":"2019","journal-title":"arXiv 1910 04376"},{"article-title":"Tenhou","year":"2022","author":"tsunoda","key":"ref14"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2015.7317929"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TG.2020.3036471"},{"key":"ref17","article-title":"Suphx: Mastering mahjong with deep reinforcement learning","author":"li","year":"2020","journal-title":"arXiv 2003 13590"},{"key":"ref18","article-title":"OpenAI Gym","author":"brockman","year":"2016","journal-title":"arXiv 1606 01540 [cs]"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1038\/nature24270"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1038\/nature16961"},{"key":"ref6","doi-asserted-by":"crossref","first-page":"418","DOI":"10.1126\/science.aao1733","article-title":"Superhuman AI for heads-up no-limit poker: Libratus beats top professionals","volume":"359","author":"brown","year":"2018","journal-title":"Science"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1126\/science.aar6404"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1126\/science.aay2400"},{"key":"ref7","doi-asserted-by":"crossref","first-page":"508","DOI":"10.1126\/science.aam6960","article-title":"DeepStack: Expert-level artificial intelligence in heads-up no-limit poker","volume":"356","author":"morav?\u00edk","year":"2017","journal-title":"Science"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(01)00129-1"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/203330.203343"},{"key":"ref9","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref20","first-page":"15032","article-title":"PettingZoo: Gym for multi-agent reinforcement learning","volume":"34","author":"terry","year":"2021","journal-title":"Advances in neural information processing systems"},{"key":"ref22","article-title":"MinAtar: An Atari-inspired testbed for thorough and reproducible reinforcement learning experiments","author":"young","year":"2019","journal-title":"arXiv 1903 03176"},{"key":"ref21","article-title":"OpenSpiel: A framework for reinforcement learning in games","author":"lanctot","year":"2019","journal-title":"arXiv 1908 09453"},{"key":"ref24","article-title":"OpenHoldem: An open toolkit for large-scale imperfect-information game research","author":"li","year":"2020","journal-title":"arXiv 2012 06168"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2019.8848075"}],"event":{"name":"2022 IEEE Conference on Games (CoG)","start":{"date-parts":[[2022,8,21]]},"location":"Beijing, China","end":{"date-parts":[[2022,8,24]]}},"container-title":["2022 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9893561\/9893544\/09893712.pdf?arnumber=9893712","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,10]],"date-time":"2022-10-10T16:25:43Z","timestamp":1665419143000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9893712\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,8,21]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/cog51982.2022.9893712","relation":{},"subject":[],"published":{"date-parts":[[2022,8,21]]}}}