{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T13:40:35Z","timestamp":1730209235745,"version":"3.28.0"},"reference-count":17,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,8,21]]},"DOI":"10.1109\/cog51982.2022.9893589","type":"proceedings-article","created":{"date-parts":[[2022,9,20]],"date-time":"2022-09-20T19:33:31Z","timestamp":1663702411000},"page":"576-579","source":"Crossref","is-referenced-by-count":1,"title":["Towards Modern Card Games with Large-Scale Action Spaces Through Action Representation"],"prefix":"10.1109","author":[{"given":"Zhiyuan","family":"Yao","sequence":"first","affiliation":[{"name":"School of Business, Stevens Institute of Technology,Hoboken,NJ,USA"}]},{"given":"Tianyu","family":"Shi","sequence":"additional","affiliation":[{"name":"University of Toronto,Intelligent Transportation Systems Centre,Ontario,Canada"}]},{"given":"Site","family":"Li","sequence":"additional","affiliation":[{"name":"Deterrence, rct AI,Burbank,CA,USA"}]},{"given":"Yiting","family":"Xie","sequence":"additional","affiliation":[{"name":"Deterrence, rct AI,Burbank,CA,USA"}]},{"given":"Yuanyuan","family":"Qin","sequence":"additional","affiliation":[{"name":"Deterrence, rct AI,Burbank,CA,USA"}]},{"given":"Xiongjie","family":"Xie","sequence":"additional","affiliation":[{"name":"Deterrence, rct AI,Burbank,CA,USA"}]},{"given":"Huan","family":"Lu","sequence":"additional","affiliation":[{"name":"Deterrence, rct AI,Burbank,CA,USA"}]},{"given":"Yan","family":"Zhang","sequence":"additional","affiliation":[{"name":"Deterrence, rct AI,Burbank,CA,USA"}]}],"member":"263","reference":[{"key":"ref10","article-title":"Perfectdou: Dominating doudizhu with perfect information distillation","author":"guan","year":"2022","journal-title":"arXiv preprint arXiv 2203 16406"},{"key":"ref11","article-title":"Deep reinforcement learning in large discrete action spaces","author":"dulac-arnold","year":"2015","journal-title":"arXiv preprint arXiv 1512 07108"},{"key":"ref12","first-page":"941","article-title":"Learning action representations for reinforcement learning","author":"chandak","year":"2019","journal-title":"International Conference on Machine Learning"},{"doi-asserted-by":"publisher","key":"ref13","DOI":"10.1109\/CoG47356.2020.9231687"},{"key":"ref14","article-title":"Efficient estimation of word representations in vector space","author":"mikolov","year":"2013","journal-title":"arXiv preprint arXiv 1301 3781"},{"year":"2018","author":"sutton","journal-title":"Reinforcement Learning An Introduction","key":"ref15"},{"doi-asserted-by":"publisher","key":"ref16","DOI":"10.1007\/978-3-319-11740-9_34"},{"doi-asserted-by":"publisher","key":"ref17","DOI":"10.1126\/science.aar6404"},{"key":"ref4","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017","journal-title":"arXiv preprint arXiv 1707 06347"},{"key":"ref3","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"International Conference on Machine Learning"},{"doi-asserted-by":"publisher","key":"ref6","DOI":"10.1126\/science.1259433"},{"key":"ref5","first-page":"12333","article-title":"Douzero: Mastering doudizhu with self-play deep reinforcement learning","author":"zha","year":"2021","journal-title":"International Conference on Machine Learning"},{"doi-asserted-by":"publisher","key":"ref8","DOI":"10.1126\/science.aay2400"},{"key":"ref7","doi-asserted-by":"crossref","first-page":"418","DOI":"10.1126\/science.aao1733","article-title":"Superhuman ai for heads-up no-limit poker: Libratus beats top professionals","volume":"359","author":"brown","year":"2018","journal-title":"Science"},{"key":"ref2","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"arXiv preprint arXiv 1509 02971"},{"key":"ref1","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref9","article-title":"Suphx: Mastering mahjong with deep reinforcement learning","author":"li","year":"2020","journal-title":"arXiv preprint arXiv 2003 13874"}],"event":{"name":"2022 IEEE Conference on Games (CoG)","start":{"date-parts":[[2022,8,21]]},"location":"Beijing, China","end":{"date-parts":[[2022,8,24]]}},"container-title":["2022 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9893561\/9893544\/09893589.pdf?arnumber=9893589","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,14]],"date-time":"2022-10-14T20:53:17Z","timestamp":1665780797000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9893589\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,8,21]]},"references-count":17,"URL":"https:\/\/doi.org\/10.1109\/cog51982.2022.9893589","relation":{},"subject":[],"published":{"date-parts":[[2022,8,21]]}}}