{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T17:19:30Z","timestamp":1774718370499,"version":"3.50.1"},"reference-count":15,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,8,21]],"date-time":"2024-08-21T00:00:00Z","timestamp":1724198400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,8,21]],"date-time":"2024-08-21T00:00:00Z","timestamp":1724198400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62071343"],"award-info":[{"award-number":["62071343"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,8,21]]},"DOI":"10.1109\/apwcs61586.2024.10679289","type":"proceedings-article","created":{"date-parts":[[2024,9,19]],"date-time":"2024-09-19T17:22:57Z","timestamp":1726766577000},"page":"1-6","source":"Crossref","is-referenced-by-count":2,"title":["Diffusion-Based Multi-Agent Reinforcement Learning with Communication"],"prefix":"10.1109","author":[{"given":"Xinyue","family":"Qi","sequence":"first","affiliation":[{"name":"College of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics,China"}]},{"given":"Jianhang","family":"Tang","sequence":"additional","affiliation":[{"name":"Guizhou University,State Key Laboratory of Public Big Data,China"}]},{"given":"Jiangming","family":"Jin","sequence":"additional","affiliation":[{"name":"Qingcheng AI"}]},{"given":"Yang","family":"Zhang","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics,China"}]}],"member":"263","reference":[{"issue":"35","key":"ref1","first-page":"24611","article-title":"The Surprising Effectiveness of PPO in Cooperative Multi-agent Games","volume-title":"Proc. NeurIPS 2022","author":"Yu","year":"2022"},{"key":"ref2","first-page":"844","article-title":"Contrasting Centralized and Decentralized Critics in Multi-agent Reinforcement Learning","volume-title":"Proc. AAMAS 2021","author":"Lyu","year":"2021"},{"key":"ref3","first-page":"1928","article-title":"Asynchronous Methods for Deep Reinforcement Learning","volume-title":"Proc. ICML 2016","author":"Mnih","year":"2016"},{"key":"ref4","article-title":"Decomposed Soft Actor-Critic Method for Cooperative Multi-agent Reinforcement Learning","author":"Pu","year":"2021","journal-title":"arXiv preprint"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/j.ejor.2020.09.018"},{"issue":"178","key":"ref6","first-page":"1","article-title":"Monotonic Value Function Factorisation for Deep Multi-agent Reinforcement Learning","volume":"21","author":"Rashid","year":"2020","journal-title":"Journal of Machine Learning Research"},{"key":"ref7","article-title":"Multi-Agent Actor-Critic for Mixed Cooperative-Competitive Environments","volume-title":"Proc. NeurIPS 2017","author":"Lowe"},{"key":"ref8","article-title":"Addressing Function Approximation Error in Actor-Critic Methods","volume-title":"Proc. ICML 2018","author":"Fujimoto","year":"2018"},{"key":"ref9","first-page":"20132","article-title":"A Minimalist Approach to Offline Reinforcement Learning","volume-title":"Pro. NeurIPS 2021","volume":"34","author":"Fujimoto","year":"2021"},{"key":"ref10","article-title":"Diffusion Policies as an Expressive Policy Class for Offline Reinforcement Learning","volume-title":"Proc. ICLR 2023","author":"Wang","year":"2023"},{"key":"ref11","article-title":"Planning with Diffusion for Flexible Behavior Synthesis","author":"Janner","year":"2022","journal-title":"arXiv preprint"},{"key":"ref12","article-title":"Diffusion-Reinforcement Learning Hierarchical Motion Planning in Adversarial Multi-agent Games","author":"Wu","year":"2024","journal-title":"arXiv preprint"},{"key":"ref13","article-title":"Madiff: Offline Multi-agent Learning with Diffusion Models","author":"Zhu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00930"},{"key":"ref15","first-page":"2052","article-title":"Off-policy deep reinforcement learning without exploration","volume-title":"International conference on machine learning","author":"Fujimoto"}],"event":{"name":"2024 VTS Asia Pacific Wireless Communications Symposium (APWCS)","location":"Singapore","start":{"date-parts":[[2024,8,21]]},"end":{"date-parts":[[2024,8,23]]}},"container-title":["2024 IEEE VTS Asia Pacific Wireless Communications Symposium (APWCS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10679110\/10679274\/10679289.pdf?arnumber=10679289","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,22]],"date-time":"2024-09-22T04:03:58Z","timestamp":1726977838000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10679289\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,8,21]]},"references-count":15,"URL":"https:\/\/doi.org\/10.1109\/apwcs61586.2024.10679289","relation":{},"subject":[],"published":{"date-parts":[[2024,8,21]]}}}