{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,21]],"date-time":"2026-04-21T15:06:45Z","timestamp":1776784005392,"version":"3.51.2"},"reference-count":35,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2022,6,1]],"date-time":"2022-06-01T00:00:00Z","timestamp":1654041600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,6,1]],"date-time":"2022-06-01T00:00:00Z","timestamp":1654041600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,6,1]],"date-time":"2022-06-01T00:00:00Z","timestamp":1654041600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Games"],"published-print":{"date-parts":[[2022,6]]},"DOI":"10.1109\/tg.2021.3049539","type":"journal-article","created":{"date-parts":[[2021,1,7]],"date-time":"2021-01-07T13:11:39Z","timestamp":1610025099000},"page":"212-220","source":"Crossref","is-referenced-by-count":53,"title":["Creating Pro-Level AI for a Real-Time Fighting Game Using Deep Reinforcement Learning"],"prefix":"10.1109","volume":"14","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1726-5569","authenticated-orcid":false,"given":"Inseok","family":"Oh","sequence":"first","affiliation":[{"name":"Game AI Lab, AI Center, NCSOFT, Gyeonggi-do, South Korea"}]},{"given":"Seungeun","family":"Rho","sequence":"additional","affiliation":[{"name":"Game AI Lab, AI Center, NCSOFT, Gyeonggi-do, South Korea"}]},{"given":"Sangbin","family":"Moon","sequence":"additional","affiliation":[{"name":"Game AI Lab, AI Center, NCSOFT, Gyeonggi-do, South Korea"}]},{"given":"Seongho","family":"Son","sequence":"additional","affiliation":[{"name":"Game AI Lab, AI Center, NCSOFT, Gyeonggi-do, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8017-1245","authenticated-orcid":false,"given":"Hyoil","family":"Lee","sequence":"additional","affiliation":[{"name":"Game AI Lab, AI Center, NCSOFT, Gyeonggi-do, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4571-0965","authenticated-orcid":false,"given":"Jinyun","family":"Chung","sequence":"additional","affiliation":[{"name":"Game AI Lab, AI Center, NCSOFT, Gyeonggi-do, South Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1126\/science.aay2400"},{"key":"ref2","first-page":"1407","article-title":"IMPALA: Scalable distributed Deep-RL with importance weighted actor-learner architectures","volume-title":"Proc. 35th Int. Conf. Mach. Learn","volume":"80","author":"Espeholt","year":"2018"},{"key":"ref3","article-title":"Beating the worlds best at Super Smash Bros. with deep reinforcement learning","author":"Firoiu","year":"2017"},{"key":"ref4","article-title":"Deep reinforcement learning from self-play in imperfect-information games","author":"Heinrich","year":"2016"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11796"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref7","article-title":"Distributed prioritized experience replay","author":"Horgan","year":"2018"},{"key":"ref8","volume-title":"Dynamic Programming and Markov Processes","author":"Howard","year":"1960"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2018.8490376"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1126\/science.aau6249"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2017.8080432"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2018.8451491"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/GCCE.2013.6664844"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/439"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref16","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"Proc. 33rd Int. Conf. Int. Conf. Mach. Learn.","volume":"48","author":"Mnih"},{"key":"ref17","first-page":"278","article-title":"Policy invariance under reward transformations: Theory and application to reward shaping","volume-title":"Proc. 16th Int. Conf. Mach. Learn.","volume":"99","author":"Ng","year":"1999"},{"key":"ref18","volume-title":"","year":"2018"},{"key":"ref19","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1038\/nature16961"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1038\/nature24270"},{"key":"ref22","article-title":"Mastering Chess and Shogi by self-play with a general reinforcement learning algorithm","author":"Silver","year":"2017"},{"key":"ref23","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton","year":"2018"},{"key":"ref24","article-title":"Starcraft II: A new challenge for reinforcement learning","author":"Vinyals","year":"2017"},{"key":"ref25","article-title":"Alphastar: Mastering the real-time strategy game Starcraft II. DeepMind blog","author":"Vinyals","year":"2019"},{"key":"ref26","article-title":"Sample efficient actor-critic with experience replay","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wang"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/GCCE.2016.7800536"},{"key":"ref28","article-title":"Suphx: Mastering Mahjong with deep reinforcement learning","author":"Li","year":"2020"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-019-1724-z"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1017\/cbo9780511791307.017"},{"key":"ref31","volume-title":""},{"key":"ref32","volume-title":""},{"key":"ref33","volume-title":"Games, Strategies and Decision Making","author":"Harrington","year":"2009"},{"key":"ref34","first-page":"805","article-title":"Fictitious self-play in extensive-form games","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Heinrich"},{"key":"ref35","article-title":"Intrinsic motivation and automatic curricula via asymmetric self-play","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Sukhbaatar"}],"container-title":["IEEE Transactions on Games"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7782673\/9797039\/09314886.pdf?arnumber=9314886","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,9]],"date-time":"2024-01-09T23:57:38Z","timestamp":1704844658000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9314886\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,6]]},"references-count":35,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tg.2021.3049539","relation":{},"ISSN":["2475-1502","2475-1510"],"issn-type":[{"value":"2475-1502","type":"print"},{"value":"2475-1510","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,6]]}}}