{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,9]],"date-time":"2025-09-09T21:55:58Z","timestamp":1757454958124,"version":"3.28.0"},"reference-count":18,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,8,21]]},"DOI":"10.1109\/cog51982.2022.9893588","type":"proceedings-article","created":{"date-parts":[[2022,9,20]],"date-time":"2022-09-20T19:33:31Z","timestamp":1663702411000},"page":"608-611","source":"Crossref","is-referenced-by-count":8,"title":["Supervised and Reinforcement Learning from Observations in Reconnaissance Blind Chess"],"prefix":"10.1109","author":[{"given":"Timo","family":"Bertram","sequence":"first","affiliation":[{"name":"JKU Linz,Austria"}]},{"given":"Johannes","family":"Furnkranz","sequence":"additional","affiliation":[{"name":"JKU Linz,Austria"}]},{"given":"Martin","family":"Muller","sequence":"additional","affiliation":[{"name":"University of Alberta,Canada"}]}],"member":"263","reference":[{"key":"ref10","article-title":"Deep synoptic Monte-Carlo planning in reconnaissance blind chess","volume":"34","author":"clark","year":"2021","journal-title":"Advances in neural information processing systems"},{"year":"2020","author":"savelyev","journal-title":"Mastering reconnaissance blind chess with reinforcement learning","key":"ref11"},{"doi-asserted-by":"publisher","key":"ref12","DOI":"10.1609\/aaai.v30i1.10013"},{"key":"ref13","article-title":"Deep reinforcement learning from self-play in imperfect-information games","author":"heinrich","year":"2016","journal-title":"CoRR"},{"key":"ref14","first-page":"10410","article-title":"DREAM: Deep regret minimization with advantage baselines and model-free learning","volume":"abs 2006","author":"steinberger","year":"2020","journal-title":"CoRR"},{"key":"ref15","first-page":"7621","article-title":"Single deep counterfactual regret minimization","volume":"abs 1901","author":"steinberger","year":"2019","journal-title":"CoRR"},{"key":"ref16","first-page":"3178","article-title":"Player of games","volume":"abs 2112","author":"schmid","year":"2021","journal-title":"CoRR"},{"key":"ref17","article-title":"Mastering chess and shogi by self-play with a general reinforcement learning algorithm","author":"silver","year":"2017","journal-title":"CoRR"},{"key":"ref18","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017","journal-title":"CoRR"},{"doi-asserted-by":"publisher","key":"ref4","DOI":"10.1126\/science.aay2400"},{"key":"ref3","article-title":"Dota 2 with large scale deep reinforcement learning","author":"berner","year":"2019","journal-title":"CoRR"},{"key":"ref6","doi-asserted-by":"crossref","first-page":"508","DOI":"10.1126\/science.aam6960","article-title":"Deepstack: Expert-level artificial intelligence in heads-up no-limit poker","volume":"356","author":"moravc?\u00edk","year":"2017","journal-title":"Science"},{"key":"ref5","first-page":"17057","article-title":"Combining deep reinforcement learning and search for imperfect-information games","volume":"33","author":"brown","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref8","first-page":"156","article-title":"Dealing with uncertainty: A piecewise grid agent for reconnaissance blind chess","volume":"35","author":"highley","year":"2020","journal-title":"Journal of Computing Sciences in Colleges"},{"key":"ref7","doi-asserted-by":"crossref","first-page":"350","DOI":"10.1038\/s41586-019-1724-z","article-title":"Grandmaster level in StarCraft II using multi-agent reinforcement learning","volume":"575","author":"vinyals","year":"2019","journal-title":"Nature"},{"doi-asserted-by":"publisher","key":"ref2","DOI":"10.1016\/j.artint.2019.103216"},{"doi-asserted-by":"publisher","key":"ref1","DOI":"10.1038\/nature16961"},{"year":"2022","author":"romstad","journal-title":"Stockfish","key":"ref9"}],"event":{"name":"2022 IEEE Conference on Games (CoG)","start":{"date-parts":[[2022,8,21]]},"location":"Beijing, China","end":{"date-parts":[[2022,8,24]]}},"container-title":["2022 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9893561\/9893544\/09893588.pdf?arnumber=9893588","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,10]],"date-time":"2022-10-10T20:25:37Z","timestamp":1665433537000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9893588\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,8,21]]},"references-count":18,"URL":"https:\/\/doi.org\/10.1109\/cog51982.2022.9893588","relation":{},"subject":[],"published":{"date-parts":[[2022,8,21]]}}}