{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T18:09:07Z","timestamp":1772906947522,"version":"3.50.1"},"reference-count":37,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"10","license":[{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Commun."],"published-print":{"date-parts":[[2023,10]]},"DOI":"10.1109\/tcomm.2023.3300331","type":"journal-article","created":{"date-parts":[[2023,8,1]],"date-time":"2023-08-01T18:08:06Z","timestamp":1690913286000},"page":"5893-5903","source":"Crossref","is-referenced-by-count":14,"title":["Distributed-Training-and-Execution Multi-Agent Reinforcement Learning for Power Control in HetNet"],"prefix":"10.1109","volume":"71","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-0719-3912","authenticated-orcid":false,"given":"Kaidi","family":"Xu","sequence":"first","affiliation":[{"name":"Department of Electrical and Electronic Engineering, Imperial College London, SW7 2AZ London, U.K"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7696-0521","authenticated-orcid":false,"given":"Nguyen","family":"Van Huynh","sequence":"additional","affiliation":[{"name":"School of Computing, Engineering and the Built Environment, Edinburgh Napier University (ENU), Edinburgh, U.K"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7894-2415","authenticated-orcid":false,"given":"Geoffrey Ye","family":"Li","sequence":"additional","affiliation":[{"name":"Department of Electrical and Electronic Engineering, Imperial College London, SW7 2AZ London, U.K"}]}],"member":"263","reference":[{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2021.3071480"},{"key":"ref35","first-page":"2681","article-title":"Deep decentralized multi-task multi-agent reinforcement learning under partial observability","author":"omidshafiei","year":"2017","journal-title":"Proc 34th Int Conf Mach Learn (ICML)"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2020.3033334"},{"key":"ref34","year":"2017","journal-title":"Radio frequency (RF) system scenarios"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2019.2933973"},{"key":"ref37","first-page":"6379","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","author":"lowe","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2022.3160697"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2020.3020400"},{"key":"ref31","first-page":"118","article-title":"Small-macro cell cooperation for HetNet uplink transmission: Spectral efficiency and reliability analyses","volume":"35","author":"al haija","year":"2017","journal-title":"IEEE J Sel Areas Commun"},{"key":"ref30","author":"chew","year":"2016","journal-title":"Potential Game Theory"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2019.2957482"},{"key":"ref33","first-page":"1995","article-title":"Dueling network architectures for deep reinforcement learning","author":"wang","year":"2016","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2018.2866382"},{"key":"ref32","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2015.2461602"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2016.2532458"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2020.3043009"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2019.2933962"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICC.2019.8761431"},{"key":"ref18","article-title":"Deep reinforcement learning for joint spectrum and power allocation in cellular networks","author":"nasir","year":"2020","journal-title":"arXiv 2012 10682"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2022.3170308"},{"key":"ref23","first-page":"1146","article-title":"Stabilising experience replay for deep multi-agent reinforcement learning","author":"foerster","year":"2017","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-307-3.50049-6"},{"key":"ref25","article-title":"Deep reinforcement learning from self-play in imperfect-information games","author":"heinrich","year":"2016","journal-title":"arXiv 1603 01121"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2022.3224751"},{"key":"ref22","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"arXiv 1509 02971"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2019.2897134"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2020.3004524"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2007.4399095"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2010.2099222"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/MWC.2016.7462490"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2018.2812733"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2013.120713.130548"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2021.3059896"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.23919\/JCC.2019.08.001"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2011.2147784"},{"key":"ref5","doi-asserted-by":"crossref","first-page":"57","DOI":"10.1109\/JSTSP.2007.914876","article-title":"Dynamic spectrum management: Complexity and duality","volume":"2","author":"luo","year":"2008","journal-title":"IEEE J Sel Topics Signal Process"}],"container-title":["IEEE Transactions on Communications"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/26\/10286985\/10198264.pdf?arnumber=10198264","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,6]],"date-time":"2023-11-06T19:17:22Z","timestamp":1699298242000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10198264\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10]]},"references-count":37,"journal-issue":{"issue":"10"},"URL":"https:\/\/doi.org\/10.1109\/tcomm.2023.3300331","relation":{},"ISSN":["0090-6778","1558-0857"],"issn-type":[{"value":"0090-6778","type":"print"},{"value":"1558-0857","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,10]]}}}