{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,10]],"date-time":"2026-02-10T18:40:48Z","timestamp":1770748848638,"version":"3.50.0"},"reference-count":69,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Ministry of Education (MOE) Academic Research Fund (AcRF) Tier 1","award":["RG13\/23"],"award-info":[{"award-number":["RG13\/23"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2025,3]]},"DOI":"10.1109\/tnnls.2024.3382985","type":"journal-article","created":{"date-parts":[[2024,4,10]],"date-time":"2024-04-10T14:19:18Z","timestamp":1712758758000},"page":"5304-5318","source":"Crossref","is-referenced-by-count":1,"title":["Decision-Making With Speculative Opponent Models"],"prefix":"10.1109","volume":"36","author":[{"ORCID":"https:\/\/orcid.org\/0009-0005-7650-1754","authenticated-orcid":false,"given":"Jing","family":"Sun","sequence":"first","affiliation":[{"name":"School of Computer Science and Engineering, Nanyang Technological University, Jurong West, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0257-1240","authenticated-orcid":false,"given":"Shuo","family":"Chen","sequence":"additional","affiliation":[{"name":"Beijing Institute for General Artificial Intelligence, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8434-1181","authenticated-orcid":false,"given":"Cong","family":"Zhang","sequence":"additional","affiliation":[{"name":"School of Computer Science and Engineering, Nanyang Technological University, Jurong West, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6639-8547","authenticated-orcid":false,"given":"Yining","family":"Ma","sequence":"additional","affiliation":[{"name":"School of Computer Science and Engineering, Nanyang Technological University, Jurong West, Singapore"}]},{"given":"Jie","family":"Zhang","sequence":"additional","affiliation":[{"name":"School of Computer Science and Engineering, Nanyang Technological University, Jurong West, Singapore"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/s10458-019-09421-1"},{"key":"ref2","first-page":"321","article-title":"Multi-agent reinforcement learning: A selective overview of theories and algorithms","volume-title":"Studies in Systems, Decision and Control","volume":"325","author":"Zhang","year":"2021"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2021.3078462"},{"key":"ref4","article-title":"Benchmarking multi-agent deep reinforcement learning algorithms in cooperative tasks","author":"Papoudakis","year":"2020","journal-title":"arXiv:2006.07869"},{"key":"ref5","first-page":"6382","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Lowe"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11794"},{"key":"ref7","first-page":"4295","article-title":"QMix: Monotonic value function factorisation for deep multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Rashid"},{"key":"ref8","article-title":"Value-decomposition networks for cooperative multi-agent learning","author":"Sunehag","year":"2017","journal-title":"arXiv:1706.05296"},{"key":"ref9","first-page":"24611","article-title":"The surprising effectiveness of PPO in cooperative multi-agent games","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Yu"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2018.01.002"},{"key":"ref11","first-page":"1804","article-title":"Opponent modeling in deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"He"},{"key":"ref12","first-page":"122","article-title":"Learning with opponent-learning awareness","volume-title":"Proc. 17th Int. Conf. Auto. Agents MultiAgent Syst.","author":"Foerster"},{"key":"ref13","first-page":"4257","article-title":"Modeling others using oneself in multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Raileanu"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/85"},{"key":"ref15","article-title":"Towards efficient detection and optimal response against sophisticated opponents","author":"Yang","year":"2018","journal-title":"arXiv:1809.04240"},{"key":"ref16","first-page":"19210","article-title":"Agent modelling under partial observability for deep reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Papoudakis"},{"key":"ref17","article-title":"Agent modelling under partial observability for deep reinforcement learning","author":"Papoudakis","year":"2020","journal-title":"arXiv:2006.09447"},{"key":"ref18","first-page":"449","article-title":"A distributional perspective on reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Bellemare"},{"key":"ref19","first-page":"1096","article-title":"Implicit quantile networks for distributional reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Dabney"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11791"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.7551\/mitpress\/14207.001.0001"},{"key":"ref22","first-page":"798","article-title":"Likelihood quantile networks for coordinating multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Auto. Agents Multiagent Syst. (AAMAS)","author":"Lyu"},{"key":"ref23","article-title":"QR-MIX: Distributional value function factorisation for cooperative multi-agent reinforcement learning","author":"Hu","year":"2020","journal-title":"arXiv:2009.04197"},{"key":"ref24","first-page":"9945","article-title":"DFAC framework: Factorizing the value function via quantile mixture for multi-agent distributional Q-learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Sun"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3202097"},{"key":"ref26","article-title":"Pommerman: A multi-agent playground","author":"Resnick","year":"2018","journal-title":"arXiv:1809.07124"},{"key":"ref27","article-title":"The StarCraft multi-agent challenge","author":"Samvelyan","year":"2019","journal-title":"arXiv:1902.04043"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-021-09996-w"},{"key":"ref29","first-page":"1","article-title":"The dynamics of reinforcement learning in cooperative multiagent systems","volume-title":"Proc. AAAI\/IAAI","author":"Claus"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-335-6.50027-1"},{"key":"ref31","first-page":"5887","article-title":"QTran: Learning to factorize with transformation for cooperative multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Son"},{"key":"ref32","article-title":"QPLEX: Duplex dueling multi-agent Q-learning","author":"Wang","year":"2020","journal-title":"arXiv:2008.01062"},{"issue":"1","key":"ref33","first-page":"374","article-title":"Iterative solution of games by fictitious play","volume":"13","author":"Brown","year":"1951","journal-title":"Act. Anal. Prod. Allocation"},{"key":"ref34","first-page":"1388","article-title":"A deep policy inference q-network for multi-agent systems","volume-title":"Proc. 17th Int. Conf. Auto. Agents MultiAgent Syst.","author":"Hong"},{"key":"ref35","first-page":"547","article-title":"Reasoning about hypothetical agent behaviours and their parameters","volume-title":"Proc. 16th Conf. Auto. Agents MultiAgent Syst.","author":"Albrecht"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/88"},{"key":"ref37","first-page":"1","article-title":"Probabilistic recursive reasoning for multi-agent reinforcement learning","volume-title":"Proc. 7th Int. Conf. Learn. Represent.","author":"Wen"},{"key":"ref38","article-title":"Modelling bounded rationality in multi-agent interactions by generalized recursive reasoning","author":"Wen","year":"2019","journal-title":"arXiv:1901.09216"},{"key":"ref39","first-page":"962","article-title":"A deep Bayesian policy reuse approach against non-stationary agents","volume-title":"Proc. 32nd Int. Conf. Neural Inf. Process. Syst.","author":"Zheng"},{"key":"ref40","first-page":"738","article-title":"Thompson sampling for Markov games with piecewise stationary opponent policies","volume-title":"Uncertainty in Artificial Intelligence","author":"DiGiovanni","year":"2021"},{"key":"ref41","first-page":"6829","article-title":"Greedy when sure and conservative when uncertain about the opponents","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fu"},{"key":"ref42","article-title":"Learning with opponent-learning awareness","author":"Foerster","year":"2017","journal-title":"arXiv:1709.04326"},{"key":"ref43","first-page":"1802","article-title":"Learning policy representations in multiagent systems","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Grover"},{"key":"ref44","first-page":"5541","article-title":"A policy gradient algorithm for learning to learn in multiagent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kim"},{"key":"ref45","first-page":"1","article-title":"Multi-agent interactions modeling with correlated policies","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Liu"},{"key":"ref46","first-page":"1712","article-title":"Deep interactive Bayesian reinforcement learning via meta-learning","volume-title":"Proc. 20th Int. Conf. Auto. Agents MultiAgent Syst.","author":"Zintgraf"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN55064.2022.9892077"},{"key":"ref48","first-page":"4218","article-title":"Machine theory of mind","volume-title":"Proc. 35th Int. Conf. Mach. Learn.","author":"Rabinowitz"},{"key":"ref49","first-page":"1","article-title":"Distributed distributional deterministic policy gradients","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Barth-Maron"},{"key":"ref50","first-page":"1352","article-title":"Distributional policy optimization: An alternative approach for continuous control","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Tessler"},{"key":"ref51","article-title":"Sample-based distributional policy gradient","author":"Singh","year":"2020","journal-title":"arXiv:2001.02652"},{"key":"ref52","first-page":"7135","article-title":"Implicit distributional reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Yue"},{"key":"ref53","article-title":"PACER: A fully push-forward-based distributional reinforcement learning algorithm","author":"Bai","year":"2023","journal-title":"arXiv:2306.06637"},{"key":"ref54","article-title":"Distributional GFlowNets with quantile flows","author":"Zhang","year":"2023","journal-title":"arXiv:2302.05793"},{"key":"ref55","first-page":"23832","article-title":"Distributional Hamilton\u2013Jacobi\u2013Bellman equations for continuous-time reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Wiltzer"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1257\/jep.15.4.143"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-019-1924-6"},{"key":"ref58","article-title":"DSAC: Distributional soft actor critic for risk-sensitive reinforcement learning","author":"Ma","year":"2020","journal-title":"arXiv:2004.14547"},{"key":"ref59","first-page":"1008","article-title":"Actor-critic algorithms","volume-title":"Proc. Conf. Neural Inf. Process. Syst.","author":"Konda"},{"key":"ref60","first-page":"387","article-title":"Deterministic policy gradient algorithms","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Silver"},{"key":"ref61","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton","year":"2018"},{"key":"ref62","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Sutton"},{"key":"ref63","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Mnih"},{"key":"ref64","first-page":"2961","article-title":"Actor-attention-critic for multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Iqbal"},{"key":"ref65","first-page":"1","article-title":"ADAM: A method for stochastic optimization","volume-title":"Proc. ICLR","author":"Kingma"},{"key":"ref66","article-title":"Automatic differentiation in PyTorch","volume-title":"Proc. NIPS Autodiff Workshop, Future Gradient-Based Mach. Learn. Softw. Techn.","author":"Paszke"},{"key":"ref67","first-page":"1","article-title":"RODE: Learning roles to decompose multi-agent tasks","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Wang"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1214\/aoms\/1177729694"},{"key":"ref69","first-page":"8792","article-title":"Generalized cross entropy loss for training deep neural networks with noisy labels","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Zhang"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/10908444\/10496229.pdf?arnumber=10496229","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,5]],"date-time":"2025-12-05T18:39:16Z","timestamp":1764959956000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10496229\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3]]},"references-count":69,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2024.3382985","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,3]]}}}