{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:19:27Z","timestamp":1775229567896,"version":"3.50.1"},"reference-count":15,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2023,6,1]],"date-time":"2023-06-01T00:00:00Z","timestamp":1685577600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,6,1]],"date-time":"2023-06-01T00:00:00Z","timestamp":1685577600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,6,1]],"date-time":"2023-06-01T00:00:00Z","timestamp":1685577600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key R&D Program of China","doi-asserted-by":"publisher","award":["2019YFB1803400"],"award-info":[{"award-number":["2019YFB1803400"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Wireless Commun."],"published-print":{"date-parts":[[2023,6]]},"DOI":"10.1109\/mwc.003.2200469","type":"journal-article","created":{"date-parts":[[2023,7,13]],"date-time":"2023-07-13T17:33:28Z","timestamp":1689269608000},"page":"112-119","source":"Crossref","is-referenced-by-count":17,"title":["Deep Reinforcement Learning Based Task-Oriented Communication in Multi-Agent Systems"],"prefix":"10.1109","volume":"30","author":[{"given":"Guojun","family":"He","sequence":"first","affiliation":[{"name":"Huazhong University of Science and Technology,China"}]},{"given":"Mingjie","family":"Feng","sequence":"additional","affiliation":[{"name":"Huazhong University of Science and Technology,China"}]},{"given":"Yu","family":"Zhang","sequence":"additional","affiliation":[{"name":"Huazhong University of Science and Technology,China"}]},{"given":"Guanghua","family":"Liu","sequence":"additional","affiliation":[{"name":"Huazhong University of Science and Technology,China"}]},{"given":"Yueyue","family":"Dai","sequence":"additional","affiliation":[{"name":"Huazhong University of Science and Technology,China"}]},{"given":"Tao","family":"Jiang","sequence":"additional","affiliation":[{"name":"Huazhong University of Science and Technology,China"}]}],"member":"263","reference":[{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2020.3023541"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2021.3087248"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2022.3195202"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/MNET.106.2100636"},{"key":"ref11","article-title":"Learning to Schedule Communication in Multi-Agent Reinforcement Learning","author":"kim","year":"0","journal-title":"Proc Int'l Conf Learning Representations"},{"key":"ref10","first-page":"2137","article-title":"Learning to Communicate With Deep Multi-Agent Reinforcement Learning","volume":"29","author":"foerster","year":"0","journal-title":"Proc Neural Information Processing Systems"},{"key":"ref2","author":"shannon","year":"1949","journal-title":"The Mathematical Theory of Communication"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/MCOM.001.2000604"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2022.3182325"},{"key":"ref7","first-page":"6379","article-title":"Multi-Agent Actor-Critic for Mixed Cooperative-Competitive Environments","volume":"30","author":"lowe","year":"0","journal-title":"Proc Neural Information Processing Systems"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/MWC.013.2100642"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/MWC.101.2100269"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICC45855.2022.9838724"},{"key":"ref6","article-title":"Continuous Control With Deep Reinforcement Learning","author":"lillicrap","year":"0","journal-title":"Proc Int'l Conf Learning Representations"},{"key":"ref5","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-Level Control Through Deep Reinforcement Learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"}],"container-title":["IEEE Wireless Communications"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7742\/10183718\/10183796.pdf?arnumber=10183796","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,8,7]],"date-time":"2023-08-07T18:32:15Z","timestamp":1691433135000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10183796\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6]]},"references-count":15,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/mwc.003.2200469","relation":{},"ISSN":["1536-1284","1558-0687"],"issn-type":[{"value":"1536-1284","type":"print"},{"value":"1558-0687","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,6]]}}}