{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T15:46:39Z","timestamp":1774021599130,"version":"3.50.1"},"reference-count":50,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"12","license":[{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Key Research and Development Program of China","award":["2023YFE0209100"],"award-info":[{"award-number":["2023YFE0209100"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U21A20519"],"award-info":[{"award-number":["U21A20519"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U23A20310"],"award-info":[{"award-number":["U23A20310"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE J. Select. Areas Commun."],"published-print":{"date-parts":[[2024,12]]},"DOI":"10.1109\/jsac.2024.3459039","type":"journal-article","created":{"date-parts":[[2024,9,12]],"date-time":"2024-09-12T17:55:17Z","timestamp":1726163717000},"page":"3566-3580","source":"Crossref","is-referenced-by-count":14,"title":["Energy-Efficient Ground-Air-Space Vehicular Crowdsensing by Hierarchical Multi-Agent Deep Reinforcement Learning With Diffusion Models"],"prefix":"10.1109","volume":"42","author":[{"given":"Yinuo","family":"Zhao","sequence":"first","affiliation":[{"name":"School of Computer Science and Technology, Beijing Institute of Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0252-329X","authenticated-orcid":false,"given":"Chi Harold","family":"Liu","sequence":"additional","affiliation":[{"name":"School of Computer Science and Technology, Beijing Institute of Technology, Beijing, China"}]},{"given":"Tianjiao","family":"Yi","sequence":"additional","affiliation":[{"name":"School of Computer Science and Technology, Beijing Institute of Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6663-6712","authenticated-orcid":false,"given":"Guozheng","family":"Li","sequence":"additional","affiliation":[{"name":"School of Computer Science and Technology, Beijing Institute of Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1755-0183","authenticated-orcid":false,"given":"Dapeng","family":"Wu","sequence":"additional","affiliation":[{"name":"Department of Computer Science, City University of Hong Kong, Kowloon Tong, SAR, Hong Kong"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/OJCOMS.2020.3010270"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/MWC.008.00353"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2019.8917306"},{"key":"ref5","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Ouyang"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2023.3268846"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2023.3320796"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2022.3175592"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICDE55515.2023.00140"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/MVT.2021.3085511"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2021.3090760"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/MVT.2019.2921244"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/VTC2023-Fall60731.2023.10333787"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2022.3182507"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2024.3378177"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2021.3121760"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2022.3217079"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2023.3256067"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2023.3298789"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM41043.2020.9155393"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2019.2904353"},{"key":"ref22","first-page":"1","article-title":"Denoising diffusion implicit models","volume-title":"Proc. ICLR","author":"Song"},{"key":"ref23","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Ho"},{"key":"ref24","first-page":"1","article-title":"Analytic-DPM: An analytic estimate of the optimal reverse variance in diffusion probabilistic models","volume-title":"Proc. ICLR","author":"Bao"},{"key":"ref25","first-page":"1","article-title":"Diffusion policies as an expressive policy class for offline reinforcement learning","volume-title":"Proc. ICLR","author":"Wang"},{"key":"ref26","first-page":"1","article-title":"Efficient diffusion policies for offline reinforcement learning","volume-title":"Proc. ICLR","volume":"36","author":"Kang"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TMC.2024.3356178"},{"key":"ref28","first-page":"1889","article-title":"Trust region policy optimization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Schulman"},{"key":"ref29","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv:1707.06347"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ELTICOM57747.2022.10037980"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2021.3102185"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/LWC.2021.3068793"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2013.020413.110848"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.2011.2178230"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2023.3345395"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2020.102861"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICC.1999.765552"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2019.2902559"},{"key":"ref39","article-title":"A quantitative measure of fairness and discrimination","author":"Jain","year":"1984"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1016\/j.orl.2016.03.007"},{"key":"ref41","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja"},{"key":"ref42","first-page":"17981","article-title":"Structured denoising diffusion models in discrete state-spaces","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Austin"},{"key":"ref43","article-title":"Is independent learning all you need in the StarCraft multi-agent challenge?","author":"Schroeder de Witt","year":"2020","journal-title":"arXiv:2011.09533"},{"key":"ref44","article-title":"High-dimensional continuous control using generalized advantage estimation","author":"Schulman","year":"2015","journal-title":"arXiv:1506.02438"},{"key":"ref45","volume-title":"Propagation Data and Prediction Methods for the Design of Terrestrial Troadband Millimetric Radio Access Systems","author":"Series","year":"2003"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2019.2947782"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/GLOCOM.2018.8647360"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2022.3188563"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/ICPEA51500.2021.9417847"},{"key":"ref50","first-page":"12491","article-title":"FOP: Factorizing optimal joint policy of maximum-entropy multi-agent reinforcement learning","volume-title":"Proc. ICML","author":"Zhang"}],"container-title":["IEEE Journal on Selected Areas in Communications"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/49\/10767099\/10679184.pdf?arnumber=10679184","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,11]],"date-time":"2024-12-11T01:37:18Z","timestamp":1733881038000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10679184\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12]]},"references-count":50,"journal-issue":{"issue":"12"},"URL":"https:\/\/doi.org\/10.1109\/jsac.2024.3459039","relation":{},"ISSN":["0733-8716","1558-0008"],"issn-type":[{"value":"0733-8716","type":"print"},{"value":"1558-0008","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,12]]}}}