{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,18]],"date-time":"2026-01-18T21:28:00Z","timestamp":1768771680204,"version":"3.49.0"},"reference-count":42,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2022,3,1]],"date-time":"2022-03-01T00:00:00Z","timestamp":1646092800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,3,1]],"date-time":"2022-03-01T00:00:00Z","timestamp":1646092800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,3,1]],"date-time":"2022-03-01T00:00:00Z","timestamp":1646092800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2018AAA0102404"],"award-info":[{"award-number":["2018AAA0102404"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2018AAA0101005"],"award-info":[{"award-number":["2018AAA0101005"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2022,3]]},"DOI":"10.1109\/tnnls.2020.3041469","type":"journal-article","created":{"date-parts":[[2020,12,11]],"date-time":"2020-12-11T21:00:51Z","timestamp":1607720451000},"page":"1228-1241","source":"Crossref","is-referenced-by-count":80,"title":["Online Minimax Q Network Learning for Two-Player Zero-Sum Markov Games"],"prefix":"10.1109","volume":"33","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-5384-423X","authenticated-orcid":false,"given":"Yuanheng","family":"Zhu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8218-9633","authenticated-orcid":false,"given":"Dongbin","family":"Zhao","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref1","volume-title":"Dynamic Programming and Markov Processes","author":"Howard","year":"1960"},{"key":"ref2","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton","year":"2018"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.39.10.1953"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/b978-1-55860-335-6.50027-1"},{"key":"ref5","first-page":"6379","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Lowe"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TETCI.2018.2823329"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2009.2036333"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1038\/nature24270"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TCIAIG.2012.2186810"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1038\/nature16961"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1126\/science.aar6404"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.2307\/j.ctvjsf522"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/9.256331"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2016.2561300"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2016.2643687"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2016.2638863"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1137\/s0363012996299557"},{"key":"ref18","first-page":"283","article-title":"Value function approximation in zero-sum Markov games","volume-title":"Proc. 18th Conf. Uncertainty Artif. Intell. (UAI)","author":"Lagoudakis"},{"key":"ref19","first-page":"1321","article-title":"Approximate dynamic programming for two-player zero-sum Markov games","volume-title":"Proc. 32nd Int. Conf. Mach. Learn.","volume":"37","author":"Perolat"},{"issue":"12","key":"ref20","first-page":"1529","article-title":"Recent progress of deep reinforcement learning: From AlphaGo to AlphaGo zero","volume":"34","author":"Tang","year":"2017","journal-title":"Control Theory Appl."},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/s12652-019-01503-y"},{"key":"ref23","article-title":"Deep reinforcement learning from self-play in imperfect-information games","author":"Heinrich","year":"2016","journal-title":"arXiv:1603.01121"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1145\/203330.203343"},{"key":"ref25","first-page":"1021","article-title":"Rational and convergent learning in stochastic games","volume-title":"Proc. 17th Int. Joint Conf. Artif. Intell. (IJCAI)","volume":"2","author":"Bowling"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-019-1724-z"},{"key":"ref27","volume-title":"The Theory of Learning in Games","author":"Fudenberg","year":"1998"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.36.1.48"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2014.2371046"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.2019.2911900"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2015.2503980"},{"key":"ref32","first-page":"1889","article-title":"Approximate modified policy iteration","volume-title":"Proc. 29th Int. Conf. Int. Conf. Mach. Learn.","author":"Scherrer"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-017-9548-4"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"ref35","first-page":"1995","article-title":"Dueling network architectures for deep reinforcement learning","volume-title":"Proc. 33rd Int. Conf. Mach. Learn.","volume":"48","author":"Wang"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.21236\/ADA276517"},{"key":"ref37","first-page":"2613","article-title":"Double Q-learning","volume-title":"Advances in Neural Information Processing Systems","author":"Hasselt","year":"2010"},{"key":"ref38","volume-title":"Differential Games: A Mathematical Theory With Applications to Warfare and Pursuit, Control and Optimization","author":"Isaacs","year":"1965"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ACC.2010.5530771"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/GCCE.2013.6664844"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/GCCE.2016.7800536"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1145\/3001773.3001797"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/9722951\/09292435.pdf?arnumber=9292435","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,9]],"date-time":"2024-01-09T23:20:44Z","timestamp":1704842444000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9292435\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,3]]},"references-count":42,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2020.3041469","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,3]]}}}