{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,19]],"date-time":"2026-02-19T05:55:36Z","timestamp":1771480536074,"version":"3.50.1"},"reference-count":71,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"10","license":[{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100018537","name":"National Science and Technology Major Project","doi-asserted-by":"publisher","award":["2022ZD0116405"],"award-info":[{"award-number":["2022ZD0116405"]}],"id":[{"id":"10.13039\/501100018537","id-type":"DOI","asserted-by":"publisher"}]},{"name":"State Key Laboratory of Complex and Critical Software Environment"},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central universities","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2025,10]]},"DOI":"10.1109\/tnnls.2025.3577259","type":"journal-article","created":{"date-parts":[[2025,7,9]],"date-time":"2025-07-09T23:19:43Z","timestamp":1752103183000},"page":"18118-18132","source":"Crossref","is-referenced-by-count":3,"title":["Robust Multi-Agent Reinforcement Learning by Mutual Information Regularization"],"prefix":"10.1109","volume":"36","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-7947-6743","authenticated-orcid":false,"given":"Simin","family":"Li","sequence":"first","affiliation":[{"name":"State Key Laboratory of Complex and Critical Software Environment, Beihang University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-8698-8744","authenticated-orcid":false,"given":"Ruixiao","family":"Xu","sequence":"additional","affiliation":[{"name":"State Key Laboratory of Software Development Environment, Beihang University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-7613-7628","authenticated-orcid":false,"given":"Jingqiao","family":"Xiu","sequence":"additional","affiliation":[{"name":"School of Computing, National University of Singapore, Singapore"}]},{"given":"Yuwei","family":"Zheng","sequence":"additional","affiliation":[{"name":"State Key Laboratory of Software Development Environment, Beihang University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6219-1741","authenticated-orcid":false,"given":"Pu","family":"Feng","sequence":"additional","affiliation":[{"name":"State Key Laboratory of Software Development Environment, Beihang University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1936-9396","authenticated-orcid":false,"given":"Yuqing","family":"Ma","sequence":"additional","affiliation":[{"name":"State Key Laboratory of Complex and Critical Software Environment, Beihang University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7064-7438","authenticated-orcid":false,"given":"Bo","family":"An","sequence":"additional","affiliation":[{"name":"College of Computing and Data Science, Nanyang Technological University, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8132-5613","authenticated-orcid":false,"given":"Yaodong","family":"Yang","sequence":"additional","affiliation":[{"name":"Institute of Artificial Intelligence, Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8425-4195","authenticated-orcid":false,"given":"Xianglong","family":"Liu","sequence":"additional","affiliation":[{"name":"State Key Laboratory of Software Development Environment, Beihang University, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"6382","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Lowe"},{"key":"ref2","first-page":"4295","article-title":"QMIX: Monotonic value function factorisation for deep multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Rashid"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2103.01955"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3329530"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2023.3236361"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2023.3264275"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2023.3292036"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2024.3387397"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-019-1724-z"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33014213"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA46639.2022.9812321"},{"key":"ref12","article-title":"Adversarial policies: Attacking deep reinforcement learning","author":"Gleave","year":"2019","journal-title":"arXiv:1905.10615"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1080\/23742917.2020.1846307"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/s10458-023-09599-5"},{"key":"ref15","article-title":"Attacking cooperative multi-agent reinforcement learning by adversarial minority influence","author":"Li","year":"2023","journal-title":"arXiv:2302.03322"},{"issue":"54","key":"ref16","first-page":"1","article-title":"Deep reinforcement learning for swarm systems","volume":"20","author":"H\u00fcttenrauch","year":"2019","journal-title":"J. Mach. Learn. Res."},{"key":"ref17","first-page":"10571","article-title":"Robust multi-agent reinforcement learning with model uncertainty","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Zhang"},{"key":"ref18","article-title":"Robust multi-agent Q-learning in cooperative games with adversaries","volume-title":"Proc. AAAI Workshop Reinforcement Learn. Games","author":"Nisioti"},{"key":"ref19","article-title":"Byzantine robust cooperative multi-agent reinforcement learning as a Bayesian game","author":"Li","year":"2023","journal-title":"arXiv:2305.12872"},{"key":"ref20","first-page":"6215","article-title":"Action robust reinforcement learning and applications in continuous control","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Tessler"},{"key":"ref21","first-page":"68121","article-title":"Robust multi-agent reinforcement learning via adversarial regularization: Theoretical foundation and stable algorithms","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Bukharin"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i10.26388"},{"key":"ref23","first-page":"22274","article-title":"Twice regularized MDPs and the equivalence between robustness and regularization","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Derman"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1518\/001872095779049543"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1201\/b10401-15"},{"key":"ref26","article-title":"Reinforcement learning and control as probabilistic inference: Tutorial and review","author":"Levine","year":"2018","journal-title":"arXiv:1805.00909"},{"key":"ref27","article-title":"Offline reinforcement learning: Tutorial, review, and perspectives on open problems","author":"Levine","year":"2020","journal-title":"arXiv:2005.01643"},{"key":"ref28","article-title":"The information bottleneck method","author":"Tishby","year":"2000","journal-title":"arXiv:physics\/0004057"},{"key":"ref29","article-title":"Soft Q-learning with mutual-information regularization","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Grau-Moya"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-28929-8"},{"key":"ref31","first-page":"20038","article-title":"Two heads are better than one: A simple exploration framework for efficient multi-agent reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Li"},{"key":"ref32","article-title":"DCIR: Dynamic consistency intrinsic reward for multi-agent reinforcement learning","author":"Lin","year":"2023","journal-title":"arXiv:2312.05783"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2025.3540467"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3326867"},{"key":"ref35","article-title":"What is the solution for state-adversarial multi-agent reinforcement learning?","author":"Han","year":"2022","journal-title":"arXiv:2212.02705"},{"key":"ref36","article-title":"Robust multi-agent reinforcement learning with state uncertainty","author":"He","year":"2023","journal-title":"Trans. Mach. Learn. Res."},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1287\/opre.1110.0931"},{"key":"ref38","article-title":"A robust and constrained multi-agent reinforcement learning electric vehicle rebalancing method in AMoD systems","author":"He","year":"2022","journal-title":"arXiv:2209.08230"},{"key":"ref39","first-page":"1055","article-title":"Learning and testing resilience in cooperative multi-agent systems","volume-title":"Proc. 19th Int. Conf. Auto. Agents MultiAgent Syst.","author":"Phan"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2021.3133537"},{"key":"ref41","article-title":"Maximum entropy RL (provably) solves some robust RL problems","author":"Eysenbach","year":"2021","journal-title":"arXiv:2103.06257"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1103\/PhysRevA.33.1134"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/18.761290"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2002.1114861"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1103\/PhysRevE.69.066138"},{"key":"ref46","first-page":"531","article-title":"Mutual information neural estimation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Belghazi"},{"key":"ref47","article-title":"Deep variational information bottleneck","author":"Alemi","year":"2016","journal-title":"arXiv:1612.00410"},{"key":"ref48","first-page":"5171","article-title":"On variational bounds of mutual information","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Poole"},{"key":"ref49","first-page":"1779","article-title":"CLUB: A contrastive log-ratio upper bound of mutual information","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Cheng"},{"key":"ref50","first-page":"3040","article-title":"Social influence as intrinsic motivation for multi-agent deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Jaques"},{"key":"ref51","article-title":"Influence-based multi-agent exploration","author":"Wang","year":"2019","journal-title":"arXiv:1910.05512"},{"key":"ref52","first-page":"7613","article-title":"MAVEN: Multi-agent variational exploration","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Mahajan"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-94662-3_12"},{"key":"ref54","article-title":"A maximum mutual information framework for multi-agent reinforcement learning","author":"Kim","year":"2020","journal-title":"arXiv:2006.02732"},{"key":"ref55","article-title":"PMIC: Improving multi-agent reinforcement learning with progressive mutual information collaboration","author":"Li","year":"2022","journal-title":"arXiv:2203.08553"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1016\/0022-247X(65)90154-X"},{"key":"ref57","article-title":"Probabilistic recursive reasoning for multi-agent reinforcement learning","author":"Wen","year":"2019","journal-title":"arXiv:1901.09207"},{"key":"ref58","first-page":"2052","article-title":"Off-policy deep reinforcement learning without exploration","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","volume":"97","author":"Fujimoto"},{"key":"ref59","first-page":"1042","article-title":"Information-theoretic considerations in batch reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Chen"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1214\/22-AOS2231"},{"key":"ref61","first-page":"1352","article-title":"Reinforcement learning with deep energy-based policies","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja"},{"key":"ref62","article-title":"Convergence of Q-learning: A simple proof","author":"Melo","year":"2001"},{"key":"ref63","article-title":"Towards deep learning models resistant to adversarial attacks","author":"Madry","year":"2017","journal-title":"arXiv:1706.06083"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/ITW.2015.7133169"},{"key":"ref65","article-title":"Opening the black box of deep neural networks via information","author":"Shwartz-Ziv","year":"2017","journal-title":"arXiv:1703.00810"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1088\/1742-5468\/ab3985"},{"key":"ref67","first-page":"188","article-title":"Accelerating reinforcement learning with learned skill priors","volume-title":"Proc. Conf. robot Learn.","author":"Pertsch"},{"key":"ref68","article-title":"The StarCraft multi-agent challenge","author":"Samvelyan","year":"2019","journal-title":"arXiv:1902.04043"},{"key":"ref69","first-page":"576","article-title":"Decentralized control of quadrotor swarms with end-to-end deep reinforcement learning","volume-title":"Proc. Conf. Robot Learn.","author":"Batra"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/TASE.2021.3064065"},{"issue":"1","key":"ref71","first-page":"59","article-title":"The e-puck, a robot designed for education in engineering","volume-title":"Proc. 9th Conf. Auto. Robot Syst. Competitions","volume":"1","author":"Mondada"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/5962385\/11195929\/11074764.pdf?arnumber=11074764","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,8]],"date-time":"2025-10-08T17:39:12Z","timestamp":1759945152000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11074764\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10]]},"references-count":71,"journal-issue":{"issue":"10"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2025.3577259","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,10]]}}}