{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,17]],"date-time":"2025-10-17T14:23:22Z","timestamp":1760711002752,"version":"3.28.0"},"reference-count":12,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,16]],"date-time":"2022-05-16T00:00:00Z","timestamp":1652659200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,16]],"date-time":"2022-05-16T00:00:00Z","timestamp":1652659200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5,16]]},"DOI":"10.1109\/icc45855.2022.9838493","type":"proceedings-article","created":{"date-parts":[[2022,8,11]],"date-time":"2022-08-11T15:37:11Z","timestamp":1660232231000},"page":"5178-5183","source":"Crossref","is-referenced-by-count":5,"title":["Throughput Optimization for SGF-NOMA via Distributed DRL with Prioritized Experience Replay"],"prefix":"10.1109","author":[{"given":"Muhammad","family":"Fayaz","sequence":"first","affiliation":[{"name":"Queen Mary University of London,London,UK"}]},{"given":"Wenqiang","family":"Yi","sequence":"additional","affiliation":[{"name":"Queen Mary University of London,London,UK"}]},{"given":"Yuanwei","family":"Liu","sequence":"additional","affiliation":[{"name":"Queen Mary University of London,London,UK"}]},{"given":"Arumugam","family":"Nallanathan","sequence":"additional","affiliation":[{"name":"Queen Mary University of London,London,UK"}]}],"member":"263","reference":[{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2021.3086762"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2017.2766778"},{"key":"ref10","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.comnet.2020.107646"},{"article-title":"Deep reinforcement learning with double q-learning","year":"2015","author":"van hasselt","key":"ref11"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2019.2903443"},{"article-title":"Prioritized experience replay","year":"2015","author":"schaul","key":"ref12"},{"article-title":"Distributed resource scheduling for large-scale MEC systems: A multi-agent ensemble deep reinforcement learning with imitation acceleration","year":"2020","author":"jiang","key":"ref8"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2017.2768666"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2020.3013514"},{"journal-title":"Reinforcement Learning An Introduction","year":"2018","author":"sutton","key":"ref9"},{"key":"ref1","first-page":"1","article-title":"Massive access for 5G and beyond","author":"chen","year":"2020","journal-title":"IEEE J Sel Areas Commun"}],"event":{"name":"ICC 2022 - IEEE International Conference on Communications","start":{"date-parts":[[2022,5,16]]},"location":"Seoul, Korea, Republic of","end":{"date-parts":[[2022,5,20]]}},"container-title":["ICC 2022 - IEEE International Conference on Communications"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9837954\/9838246\/09838493.pdf?arnumber=9838493","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,9,5]],"date-time":"2022-09-05T16:29:40Z","timestamp":1662395380000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9838493\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,16]]},"references-count":12,"URL":"https:\/\/doi.org\/10.1109\/icc45855.2022.9838493","relation":{},"subject":[],"published":{"date-parts":[[2022,5,16]]}}}