{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,7,4]],"date-time":"2025-07-04T05:18:11Z","timestamp":1751606291360},"reference-count":11,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,12,1]],"date-time":"2021-12-01T00:00:00Z","timestamp":1638316800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,12,1]],"date-time":"2021-12-01T00:00:00Z","timestamp":1638316800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,12]]},"DOI":"10.1109\/globecom46510.2021.9685914","type":"proceedings-article","created":{"date-parts":[[2022,2,2]],"date-time":"2022-02-02T16:59:04Z","timestamp":1643821144000},"page":"1-6","source":"Crossref","is-referenced-by-count":9,"title":["Multi-agent deep reinforcement learning (MADRL) meets multi-user MIMO systems"],"prefix":"10.1109","author":[{"given":"Heunchul","family":"Lee","sequence":"first","affiliation":[{"name":"Ericsson Research, Ericsson AB,Stockholm,Sweden"}]},{"given":"Jaeseong","family":"Jeong","sequence":"additional","affiliation":[{"name":"Ericsson Research, Ericsson AB,Stockholm,Sweden"}]}],"member":"263","reference":[{"key":"ref4","first-page":"2094","article-title":"Counterfactual multi-agent policy gradients","author":"foerster","year":"0","journal-title":"AAAI Conference on Artificial Intelligence (AAAI)"},{"key":"ref3","first-page":"6382","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environ-ments","author":"lowe","year":"0","journal-title":"Proceedings of the 31st International Conference on Neural Information Processing Systems (NIPS)"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/LCOMM.2010.12.101635"},{"journal-title":"Reinforcement Learning An Introduction","year":"2017","author":"sutton","key":"ref6"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2007.360373"},{"journal-title":"4G LTE-Advanced Pro and The Road to 5G","year":"2016","author":"dahlman","key":"ref5"},{"key":"ref8","article-title":"Deep reinforcement learning approach to MIMO precoding problem: Optimality and Robustness","author":"lee","year":"2020","journal-title":"IEEE Transactions on Wireless Communications"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICC40277.2020.9148742"},{"key":"ref2","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"0","journal-title":"International Conference on Learning Representations (ICLR)"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2008.928095"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/6GSUMMIT49458.2020.9083880"}],"event":{"name":"GLOBECOM 2021 - 2021 IEEE Global Communications Conference","start":{"date-parts":[[2021,12,7]]},"location":"Madrid, Spain","end":{"date-parts":[[2021,12,11]]}},"container-title":["2021 IEEE Global Communications Conference (GLOBECOM)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9685019\/9685006\/09685914.pdf?arnumber=9685914","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,4]],"date-time":"2022-07-04T16:11:06Z","timestamp":1656951066000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9685914\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,12]]},"references-count":11,"URL":"https:\/\/doi.org\/10.1109\/globecom46510.2021.9685914","relation":{},"subject":[],"published":{"date-parts":[[2021,12]]}}}