{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T15:43:56Z","timestamp":1775144636637,"version":"3.50.1"},"reference-count":38,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"1","license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100000266","name":"Engineering and Physical Sciences Research Council (EPSRC), U.K","doi-asserted-by":"publisher","award":["EP\/W004348\/1"],"award-info":[{"award-number":["EP\/W004348\/1"]}],"id":[{"id":"10.13039\/501100000266","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Commun."],"published-print":{"date-parts":[[2023,1]]},"DOI":"10.1109\/tcomm.2022.3220870","type":"journal-article","created":{"date-parts":[[2022,11,9]],"date-time":"2022-11-09T20:38:27Z","timestamp":1668026307000},"page":"101-114","source":"Crossref","is-referenced-by-count":28,"title":["Scalable Multi-Agent Reinforcement Learning for Dynamic Coordinated Multipoint Clustering"],"prefix":"10.1109","volume":"71","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-7874-4614","authenticated-orcid":false,"given":"Fenghe","family":"Hu","sequence":"first","affiliation":[{"name":"Engineering, King&#x2019;s College London, London, U.K"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1001-7036","authenticated-orcid":false,"given":"Yansha","family":"Deng","sequence":"additional","affiliation":[{"name":"Engineering, King&#x2019;s College London, London, U.K"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9935-0435","authenticated-orcid":false,"given":"A.","family":"Hamid Aghvami","sequence":"additional","affiliation":[{"name":"Engineering, King&#x2019;s College London, London, U.K"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/LWC.2014.2340405"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2017.2662212"},{"key":"ref3","first-page":"757","article-title":"A novel static clustering approach for CoMP","volume-title":"Proc. 7th Int. Conf. Comput. Converg. Technol. (ICCCT)","author":"Ali"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/PIMRC.2011.6139718"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2019.2933973"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2020.3036965"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2020.3017281"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2020.3036962"},{"key":"ref9","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","author":"Lowe","year":"2017","journal-title":"arXiv:1706.02275"},{"key":"ref10","article-title":"Opponent modeling in deep reinforcement learning","author":"He","year":"2016","journal-title":"arXiv:1609.05559"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2021.3063822"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2020.3018825"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2021.3088689"},{"key":"ref14","article-title":"QMIX: Monotonic value function factorisation for deep multi-agent reinforcement learning","author":"Rashid","year":"2018","journal-title":"arXiv:1803.11485"},{"key":"ref15","article-title":"Trust region policy optimization","author":"Schulman","year":"2015","journal-title":"arXiv:1502.05477"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.11396"},{"key":"ref17","article-title":"Mean field multi-agent reinforcement learning","author":"Yang","year":"2018","journal-title":"arXiv:1802.05438"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2019.2904352"},{"key":"ref19","article-title":"Off-policy multi-agent decomposed policy gradients","author":"Wang","year":"2020","journal-title":"arXiv:2007.12322"},{"key":"ref20","article-title":"Ecological reinforcement learning","author":"Co-Reyes","year":"2020","journal-title":"arXiv:2006.12478"},{"key":"ref21","article-title":"Discounted reinforcement learning is not an optimization problem","author":"Naik","year":"2019","journal-title":"arXiv:1910.02140"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/MNET.2019.1800464"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/PIMRC.2018.8580792"},{"key":"ref24","article-title":"Generalized coordinated multipoint framework for 5G and beyond","author":"Sohaib J. Solaija","year":"2020","journal-title":"arXiv:2008.06343"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1631\/fitee.1900661"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2009.07.008"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2019.2942929"},{"key":"ref28","article-title":"A distributional perspective on reinforcement learning","author":"Bellemare","year":"2017","journal-title":"arXiv:1707.06887"},{"key":"ref29","article-title":"Correlation alignment for unsupervised domain adaptation","author":"Sun","year":"2016","journal-title":"arXiv:1612.01939"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1214\/105051604000000116"},{"key":"ref31","article-title":"Actor-critic algorithms for learning Nash equilibria in N-player general-sum games","author":"Prasad","year":"2014","journal-title":"arXiv:1401.2086"},{"key":"ref32","article-title":"Actor-critic algorithms for constrained multi-agent reinforcement learning","author":"Diddigi","year":"2019","journal-title":"arXiv:1905.02907"},{"key":"ref33","article-title":"Parameter sharing for heterogeneous agents in multi-agent reinforcement learning","author":"Terry","year":"2020","journal-title":"arXiv:2005.13625"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.23919\/CCC55666.2022.9902315"},{"key":"ref35","first-page":"230","article-title":"CoCoA: A general framework for communication-efficient distributed optimization","volume":"18","author":"Smith","year":"2018","journal-title":"J. Mach. Learn. Res."},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11796"},{"key":"ref37","article-title":"Continuous control with deep reinforcement learning","author":"Lillicrap","year":"2015","journal-title":"arXiv:1509.02971"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-71682-4_5"}],"container-title":["IEEE Transactions on Communications"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/26\/10016736\/09942825.pdf?arnumber=9942825","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,1]],"date-time":"2024-02-01T02:02:53Z","timestamp":1706752973000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9942825\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,1]]},"references-count":38,"journal-issue":{"issue":"1"},"URL":"https:\/\/doi.org\/10.1109\/tcomm.2022.3220870","relation":{},"ISSN":["0090-6778","1558-0857"],"issn-type":[{"value":"0090-6778","type":"print"},{"value":"1558-0857","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,1]]}}}