{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T19:08:09Z","timestamp":1775070489413,"version":"3.50.1"},"reference-count":57,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U22B2061"],"award-info":[{"award-number":["U22B2061"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2022YFB4300603"],"award-info":[{"award-number":["2022YFB4300603"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Neurocomputing"],"published-print":{"date-parts":[[2026,6]]},"DOI":"10.1016\/j.neucom.2026.133438","type":"journal-article","created":{"date-parts":[[2026,3,23]],"date-time":"2026-03-23T17:12:00Z","timestamp":1774285920000},"page":"133438","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["Double distillation network for multi-agent reinforcement learning"],"prefix":"10.1016","volume":"682","author":[{"ORCID":"https:\/\/orcid.org\/0009-0009-6141-2133","authenticated-orcid":false,"given":"Yang","family":"Zhou","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3180-0815","authenticated-orcid":false,"given":"Siying","family":"Wang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9933-8014","authenticated-orcid":false,"given":"Wenyu","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Ruoning","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4882-4063","authenticated-orcid":false,"given":"Zhitong","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Zixuan","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Zijun","family":"Ma","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.neucom.2026.133438_bib0005","doi-asserted-by":"crossref","first-page":"92177","DOI":"10.1109\/ACCESS.2022.3202938","article-title":"Unmanned Aerial vehicle swarm cooperative decision-making for Sead mission: a hierarchical multiagent reinforcement learning approach","volume":"10","author":"Yue","year":"2022","journal-title":"IEEE Access"},{"key":"10.1016\/j.neucom.2026.133438_bib0010","doi-asserted-by":"crossref","first-page":"122","DOI":"10.1016\/j.jmsy.2023.11.010","article-title":"Multirobot collaborative task dynamic scheduling based on multiagent reinforcement learning with heuristic graph convolution considering robot service performance","volume":"72","author":"Zhou","year":"2024","journal-title":"J. Manuf. Syst."},{"key":"10.1016\/j.neucom.2026.133438_bib0015","doi-asserted-by":"crossref","first-page":"4710","DOI":"10.3390\/s23104710","article-title":"A comprehensive survey on multi-agent reinforcement learning for connected and automated vehicles","volume":"23","author":"Yadav","year":"2023","journal-title":"Sensors"},{"key":"10.1016\/j.neucom.2026.133438_bib0020","doi-asserted-by":"crossref","first-page":"82","DOI":"10.1016\/j.neucom.2016.01.031","article-title":"Multi-agent reinforcement learning as a rehearsal for decentralized planning","volume":"190","author":"Kraemer","year":"2016","journal-title":"Neurocomputing"},{"key":"10.1016\/j.neucom.2026.133438_bib0025","series-title":"Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems","doi-asserted-by":"crossref","first-page":"2085","DOI":"10.65109\/JSRC7365","article-title":"Value-decomposition networks for cooperative multi-agent learning based on team reward","author":"Sunehag","year":"2018"},{"key":"10.1016\/j.neucom.2026.133438_bib0030","first-page":"1","article-title":"Monotonic value function factorisation for deep multi-agent reinforcement learning","volume":"21","author":"Rashid","year":"2020","journal-title":"J. Mach. Learn. Res."},{"key":"10.1016\/j.neucom.2026.133438_bib0035","author":"Yang"},{"key":"10.1016\/j.neucom.2026.133438_bib0040","series-title":"International Conference on Machine Learning","first-page":"6860","article-title":"Coach-player multi-agent reinforcement learning for dynamic team composition","author":"Liu","year":"2021"},{"key":"10.1016\/j.neucom.2026.133438_bib0045","doi-asserted-by":"crossref","first-page":"140","DOI":"10.1109\/TG.2022.3232390","article-title":"Ctds: centralized teacher with decentralized student for multiagent reinforcement learning","volume":"16","author":"Zhao","year":"2022","journal-title":"IEEE Trans. Games"},{"key":"10.1016\/j.neucom.2026.133438_bib0050","doi-asserted-by":"crossref","first-page":"32438","DOI":"10.52202\/068431-2350","article-title":"Rethinking individual global max in cooperative multi-agent reinforcement learning","volume":"35","author":"Hong","year":"2022","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.neucom.2026.133438_bib0055","series-title":"2022 International Joint Conference on Neural Networks (IJCNN)","first-page":"1","article-title":"Commander-soldiers reinforcement learning for cooperative multi-agent systems","author":"Chen","year":"2022"},{"key":"10.1016\/j.neucom.2026.133438_bib0060","doi-asserted-by":"crossref","first-page":"5023","DOI":"10.1007\/s10462-022-10299-x","article-title":"Deep multiagent reinforcement learning: challenges and directions","volume":"56","author":"Wong","year":"2023","journal-title":"Artif. Intell. Rev."},{"key":"10.1016\/j.neucom.2026.133438_bib0065","doi-asserted-by":"crossref","first-page":"13677","DOI":"10.1007\/s10489-022-04105-y","article-title":"A review of cooperative multi-agent deep reinforcement learning","volume":"53","author":"Oroojlooy","year":"2023","journal-title":"Applied Intell."},{"key":"10.1016\/j.neucom.2026.133438_bib0070","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2025.130586","article-title":"Collision-free motion-constrained path planning for multiple unmanned delivery vehicles based on heuristic deep reinforcement learning","author":"Han","year":"2025","journal-title":"Neurocomputing"},{"key":"10.1016\/j.neucom.2026.133438_bib0075","article-title":"High-performance multi-agent path finding in high-obstacle-density and large-size maps","author":"Sun","year":"2025","journal-title":"Neurocomputing"},{"key":"10.1016\/j.neucom.2026.133438_bib0080","doi-asserted-by":"crossref","DOI":"10.1016\/j.engappai.2022.105019","article-title":"A deep reinforcement learning-based cooperative approach for multi-intersection traffic signal control","volume":"114","author":"Haddad","year":"2022","journal-title":"Eng. Appl. Artif. Intell."},{"key":"10.1016\/j.neucom.2026.133438_bib0085","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2023.110696","article-title":"Auto-learning communication reinforcement learning for multi-intersection traffic light control","volume":"275","author":"Zhu","year":"2023","journal-title":"Knowl.-Based Syst."},{"key":"10.1016\/j.neucom.2026.133438_bib0090","article-title":"Decentralized neighboring information fusion for traffic network signal control","author":"Liu","year":"2025","journal-title":"Neurocomputing"},{"key":"10.1016\/j.neucom.2026.133438_bib0095","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2025.131056","article-title":"UAV formation control based on ensemble reinforcement learning","author":"Wu","year":"2025","journal-title":"Neurocomputing"},{"key":"10.1016\/j.neucom.2026.133438_bib0100","article-title":"A sample selection mechanism for multi-Ucav AIR combat policy training using multi-agent reinforcement learning","author":"Zihui","year":"2025","journal-title":"Chin. J. Aeronaut."},{"key":"10.1016\/j.neucom.2026.133438_bib0105","series-title":"2020 International Joint Conference on Neural Networks (IJCNN)","first-page":"1","article-title":"Multi-agent connected autonomous driving using deep reinforcement learning","author":"Palanisamy","year":"2020"},{"key":"10.1016\/j.neucom.2026.133438_bib0110","doi-asserted-by":"crossref","first-page":"2478","DOI":"10.1109\/LRA.2025.3531146","article-title":"Intnet: a communication-driven multi-agent reinforcement learning framework for cooperative autonomous driving","author":"Parada","year":"2025","journal-title":"IEEE Robot. Autom. Lett."},{"key":"10.1016\/j.neucom.2026.133438_bib0115","series-title":"International Conference on Machine Learning","first-page":"5887","article-title":"Qtran: learning to factorize with transformation for cooperative multi-agent reinforcement learning","author":"Son","year":"2019"},{"key":"10.1016\/j.neucom.2026.133438_bib0120","first-page":"10199","article-title":"Weighted qmix: expanding monotonic value function factorisation for deep multi-agent reinforcement learning","volume":"33","author":"Rashid","year":"2020","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.neucom.2026.133438_bib0125","series-title":"International Conference on Learning Representations","first-page":"1","article-title":"Qplex: duplex dueling multi-agent q-learning","author":"Wang","year":"2021"},{"key":"10.1016\/j.neucom.2026.133438_bib0130","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2023.110709","article-title":"Regularization-adapted Anderson acceleration for multi-agent reinforcement learning","volume":"275","author":"Wang","year":"2023","journal-title":"Knowl.-Based Syst."},{"key":"10.1016\/j.neucom.2026.133438_bib0135","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2024.111719","article-title":"Qdap: downsizing adaptive policy for cooperative multi-agent reinforcement learning","volume":"294","author":"Zhao","year":"2024","journal-title":"Knowl.-Based Syst."},{"key":"10.1016\/j.neucom.2026.133438_bib0140","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2024.112665","article-title":"Enhancing collaboration in multi-agent reinforcement learning with correlated trajectories","volume":"305","author":"Wang","year":"2024","journal-title":"Knowl.-Based Syst."},{"key":"10.1016\/j.neucom.2026.133438_bib0145","doi-asserted-by":"crossref","first-page":"1","DOI":"10.3390\/math14010001","article-title":"Assisted-value factorization with latent interaction in cooperate multi-agent reinforcement learning","volume":"13","author":"Zhao","year":"2025","journal-title":"Mathematics"},{"key":"10.1016\/j.neucom.2026.133438_bib0150","first-page":"1","article-title":"Noise-regularized advantage value for multi-agent reinforcement learning","volume":"10","author":"Wang","year":"2022","journal-title":"Mathematics"},{"key":"10.1016\/j.neucom.2026.133438_bib0155","doi-asserted-by":"crossref","first-page":"1051","DOI":"10.1109\/TCDS.2023.3326297","article-title":"State augmentation via self-supervision in offline multiagent reinforcement learning","volume":"16","author":"Wang","year":"2023","journal-title":"IEEE Trans. Cogn. Dev. Syst."},{"key":"10.1016\/j.neucom.2026.133438_bib0160","doi-asserted-by":"crossref","DOI":"10.1016\/j.neunet.2024.106547","article-title":"Optimistic sequential multi-agent reinforcement learning with motivational communication","volume":"179","author":"Huang","year":"2024","journal-title":"Neural Netw."},{"key":"10.1016\/j.neucom.2026.133438_bib0165","author":"Tan"},{"key":"10.1016\/j.neucom.2026.133438_bib0170","series-title":"2023 International Annual Conference on Complex Systems and Intelligent Science (CSIS-IAC)","first-page":"259","article-title":"A learnable noise exploration method for multi-agent reinforcement learning","author":"Zhao","year":"2023"},{"key":"10.1016\/j.neucom.2026.133438_bib0175","first-page":"38","article-title":"Distilling the knowledge in a neural network","volume":"14","author":"Hinton","year":"2015","journal-title":"Comput. Sci."},{"key":"10.1016\/j.neucom.2026.133438_bib0180","doi-asserted-by":"crossref","first-page":"1789","DOI":"10.1007\/s11263-021-01453-z","article-title":"Knowledge distillation: a survey","volume":"129","author":"Gou","year":"2021","journal-title":"Int. J. Comput. Vis."},{"key":"10.1016\/j.neucom.2026.133438_bib0185","doi-asserted-by":"crossref","first-page":"3048","DOI":"10.1109\/TPAMI.2021.3055564","article-title":"Knowledge distillation and student-teacher learning for visual intelligence: a review and new outlooks","volume":"44","author":"Wang","year":"2021","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.neucom.2026.133438_bib0190","first-page":"1","article-title":"A survey on knowledge distillation of large language models","author":"Xu","year":"2024","journal-title":"IEEE Trans. Artif. Intell."},{"key":"10.1016\/j.neucom.2026.133438_bib0195","doi-asserted-by":"crossref","first-page":"3191","DOI":"10.1109\/JSAC.2023.3310046","article-title":"Digital Twin enhanced federated reinforcement learning with lightweight knowledge distillation in mobile networks","volume":"41","author":"Zhou","year":"2023","journal-title":"IEEE J. Sel. Areas Commun."},{"key":"10.1016\/j.neucom.2026.133438_bib0200","first-page":"7449","article-title":"Eliminating primacy bias in online reinforcement learning by self-distillation","author":"Li","year":"2024","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"10.1016\/j.neucom.2026.133438_bib0205","series-title":"2024 IEEE International Conference on Robotics and Automation (ICRA)","first-page":"7447","article-title":"Guided online distillation: promoting safe reinforcement learning by offline demonstration","author":"Li","year":"2024"},{"key":"10.1016\/j.neucom.2026.133438_bib0210","first-page":"9148","article-title":"Multi-teacher knowledge distillation with reinforcement learning for visual recognition","volume":"39","author":"Yang","year":"2025","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"10.1016\/j.neucom.2026.133438_bib0215","doi-asserted-by":"crossref","first-page":"8515","DOI":"10.1016\/j.asr.2025.04.009","article-title":"Cmkd-Net: a cross-modal knowledge distillation method for remote sensing image classification","volume":"75","author":"Song","year":"2025","journal-title":"Adv. Space Res."},{"key":"10.1016\/j.neucom.2026.133438_bib0220","doi-asserted-by":"crossref","first-page":"1002","DOI":"10.3390\/sym17071002","article-title":"Symmetrical learning and transferring: efficient knowledge distillation for remote sensing image classification","volume":"17","author":"Song","year":"2025","journal-title":"Symmetry"},{"key":"10.1016\/j.neucom.2026.133438_bib0225","first-page":"1","article-title":"Policy distillation","author":"Rusu","year":"2015","journal-title":"Comput. Sci."},{"key":"10.1016\/j.neucom.2026.133438_bib0230","series-title":"Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence, IJCAI-24","first-page":"31","article-title":"Ptde: personalized training with distilled execution for multi-agent reinforcement learning","author":"Chen","year":"2024"},{"key":"10.1016\/j.neucom.2026.133438_bib0235","first-page":"1","article-title":"Attentional knowledge-based state-space model for electrocardiogram signal classification","volume":"74","author":"Xiao","year":"2025","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"10.1016\/j.neucom.2026.133438_bib0240","doi-asserted-by":"crossref","first-page":"3413","DOI":"10.1109\/TBDATA.2025.3594294","article-title":"Knowledge aggregation transformer network for multivariate time series classification","volume":"11","author":"Xiao","year":"2025","journal-title":"IEEE Trans. Big Data"},{"key":"10.1016\/j.neucom.2026.133438_bib0245","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2025.103250","article-title":"Dpem: dual-perspective enhanced mamba for multivariate time series forecasting","volume":"123","author":"Hou","year":"2025","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.neucom.2026.133438_bib0250","doi-asserted-by":"crossref","first-page":"174","DOI":"10.1109\/TFUZZ.2025.3626982","article-title":"Cae-fcm: context-aware enhanced fuzzy cognitive maps for interpretable multivariate time series forecasting","volume":"34","author":"Hou","year":"2025","journal-title":"IEEE Trans. Fuzzy Syst."},{"key":"10.1016\/j.neucom.2026.133438_bib0255","first-page":"21688","article-title":"One node one model: featuring the missing-half for graph clustering","volume":"39","author":"Xie","year":"2025","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"10.1016\/j.neucom.2026.133438_bib0260","series-title":"A Concise Introduction to Decentralized POMDPs","volume":"vol. 1","author":"Oliehoek","year":"2016"},{"key":"10.1016\/j.neucom.2026.133438_bib0265","first-page":"24611","article-title":"The surprising effectiveness of PPO in cooperative multi-agent games","volume":"35","author":"Yu","year":"2022","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.neucom.2026.133438_bib0270","author":"Burda"},{"key":"10.1016\/j.neucom.2026.133438_bib0275","author":"Samvelyan"},{"key":"10.1016\/j.neucom.2026.133438_bib0280","series-title":"International Conference on Machine Learning","first-page":"28387","article-title":"Individual contributions as intrinsic exploration scaffolds for multi-agent reinforcement learning","author":"Li","year":"2024"},{"key":"10.1016\/j.neucom.2026.133438_bib0285","series-title":"Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence","first-page":"3926","article-title":"Group-aware coordination graph for multi-agent reinforcement learning","author":"Duan","year":"2024"}],"container-title":["Neurocomputing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0925231226008350?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0925231226008350?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T17:54:12Z","timestamp":1775066052000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0925231226008350"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,6]]},"references-count":57,"alternative-id":["S0925231226008350"],"URL":"https:\/\/doi.org\/10.1016\/j.neucom.2026.133438","relation":{},"ISSN":["0925-2312"],"issn-type":[{"value":"0925-2312","type":"print"}],"subject":[],"published":{"date-parts":[[2026,6]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Double distillation network for multi-agent reinforcement learning","name":"articletitle","label":"Article Title"},{"value":"Neurocomputing","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.neucom.2026.133438","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier B.V. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"133438"}}