{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T22:44:23Z","timestamp":1776206663863,"version":"3.50.1"},"reference-count":55,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"6","license":[{"start":{"date-parts":[[2024,6,1]],"date-time":"2024-06-01T00:00:00Z","timestamp":1717200000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,6,1]],"date-time":"2024-06-01T00:00:00Z","timestamp":1717200000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,6,1]],"date-time":"2024-06-01T00:00:00Z","timestamp":1717200000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62006111"],"award-info":[{"award-number":["62006111"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62073160"],"award-info":[{"award-number":["62073160"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100000923","name":"Australian Research Council\u2019s Future Fellowship Funding Scheme","doi-asserted-by":"publisher","award":["FT220100656"],"award-info":[{"award-number":["FT220100656"]}],"id":[{"id":"10.13039\/501100000923","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004608","name":"Natural Science Foundation of Jiangsu Province of China","doi-asserted-by":"publisher","award":["BK20200330"],"award-info":[{"award-number":["BK20200330"]}],"id":[{"id":"10.13039\/501100004608","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Alexander von Humboldt Foundation, Germany"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2024,6]]},"DOI":"10.1109\/tnnls.2022.3230701","type":"journal-article","created":{"date-parts":[[2022,12,29]],"date-time":"2022-12-29T18:35:13Z","timestamp":1672338913000},"page":"8557-8569","source":"Crossref","is-referenced-by-count":9,"title":["Depthwise Convolution for Multi-Agent Communication With Enhanced Mean-Field Approximation"],"prefix":"10.1109","volume":"35","author":[{"given":"Donghan","family":"Xie","sequence":"first","affiliation":[{"name":"Department of Control Science and Intelligence Engineering, School of Management and Engineering, Nanjing University, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0304-3965","authenticated-orcid":false,"given":"Zhi","family":"Wang","sequence":"additional","affiliation":[{"name":"Department of Control Science and Intelligence Engineering, School of Management and Engineering, Nanjing University, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3929-4707","authenticated-orcid":false,"given":"Chunlin","family":"Chen","sequence":"additional","affiliation":[{"name":"Department of Control Science and Intelligence Engineering, School of Management and Engineering, Nanjing University, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7425-3559","authenticated-orcid":false,"given":"Daoyi","family":"Dong","sequence":"additional","affiliation":[{"name":"School of Engineering and Information Technology, University of New South Wales, Canberra, ACT, Australia"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/tnn.1998.712192"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1038\/s41562-019-0804-2"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/s10458-020-09480-9"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2019.2927869"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TMECH.2019.2899365"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref8","article-title":"Continuous control with deep reinforcement learning","author":"Lillicrap","year":"2015","journal-title":"arXiv:1509.02971"},{"key":"ref9","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv:1707.06347"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3055499"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2018.2806087"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.2019.2898389"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/tcyb.2022.3170485"},{"key":"ref14","first-page":"2961","article-title":"Actor-attention-critic for multi-agent reinforcement learning","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","volume":"97","author":"Iqbal"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i05.6211"},{"key":"ref16","first-page":"1","article-title":"Graph convolutional reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Jiang"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-019-1724-z"},{"key":"ref18","article-title":"Learning individually inferred communication for multi-agent cooperation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ding"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2020\/345"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-307-3.50049-6"},{"key":"ref21","first-page":"6379","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Lowe"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11794"},{"key":"ref23","first-page":"2085","article-title":"Value-decomposition networks for cooperative multi-agent learning based on team reward","volume-title":"Proc. Int. Conf. Auto. Agents Multiagent Syst.","author":"Sunehag"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/635"},{"key":"ref25","first-page":"4295","article-title":"QMIX: Monotonic value function factorisation for deep multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Rashid"},{"key":"ref26","first-page":"10199","article-title":"Weighted QMIX: Expanding monotonic value function factorisation for deep multi-agent reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Rashid"},{"key":"ref27","first-page":"3930","article-title":"UneVEn: Universal value exploration for multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"139","author":"Gupta"},{"key":"ref28","first-page":"12491","article-title":"FOP: Factorizing optimal joint policy of maximum-entropy multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"139","author":"Zhang"},{"key":"ref29","first-page":"2244","article-title":"Learning multi-agent communication with backpropagation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Sukhbaatar"},{"key":"ref30","first-page":"1538","article-title":"TarMAC: Targeted multi-agent communication","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Das"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2021.110143"},{"key":"ref32","first-page":"2137","article-title":"Learning to communicate with deep multi-agent reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Foerster"},{"key":"ref33","article-title":"Multiagent bidirectionally-coordinated nets: Emergence of human-level coordination in learning to play StarCraft combat games","author":"Peng","year":"2017","journal-title":"arXiv:1703.10069"},{"key":"ref34","first-page":"9908","article-title":"Learning efficient multi-agent communication: An information bottleneck approach","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Wang"},{"key":"ref35","first-page":"7254","article-title":"Learning attentional communication for multi-agent cooperation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Jiang"},{"key":"ref36","first-page":"1","article-title":"Learning when to communicate at scale in multiagent cooperative and competitive tasks","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Singh"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1038\/nature14539"},{"key":"ref38","article-title":"MobileNets: Efficient convolutional neural networks for mobile vision applications","author":"Howard","year":"2017","journal-title":"arXiv:1704.04861"},{"key":"ref39","first-page":"5571","article-title":"Mean field multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Yang"},{"key":"ref40","first-page":"1","article-title":"SUMO\u2014Simulation of urban mobility: An overview","volume-title":"Proc. 3rd Int. Conf. Adv. Syst. Simul.","author":"Behrisch"},{"key":"ref41","first-page":"2186","article-title":"The StarCraft multi-agent challenge","volume-title":"Proc. Int. Conf. Auto. Agents Multiagent Syst.","author":"Samvelyan"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2021.3108514"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3070824"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2020.109245"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2020.3028171"},{"key":"ref46","first-page":"10088","article-title":"Learning multi-agent communication through structured attentive reasoning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Rangwala"},{"key":"ref47","first-page":"764","article-title":"Deep implicit coordination graphs for multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Auto. Agents Multiagent Syst.","author":"Li"},{"key":"ref48","first-page":"1","article-title":"Learning nearly decomposable value functions via communication minimization","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Wang"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/TNSE.2022.3178757"},{"key":"ref50","first-page":"871","article-title":"Extending Q-learning to general adaptive multi-agent systems","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Tesauro"},{"key":"ref51","first-page":"1146","article-title":"Stabilising experience replay for deep multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Foerster"},{"key":"ref52","volume-title":"Phase Transitions and Critical Phenomena","author":"Domb","year":"2000"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-335-6.50027-1"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2019.2901791"},{"key":"ref55","first-page":"1","article-title":"Multi-agent reinforcement learning for networked system control","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Chu"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/10547160\/10003136.pdf?arnumber=10003136","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,11]],"date-time":"2024-12-11T01:48:02Z","timestamp":1733881682000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10003136\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6]]},"references-count":55,"journal-issue":{"issue":"6"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2022.3230701","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,6]]}}}