{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,7]],"date-time":"2024-09-07T11:48:21Z","timestamp":1725709701664},"reference-count":14,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,6,1]],"date-time":"2021-06-01T00:00:00Z","timestamp":1622505600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,6,1]],"date-time":"2021-06-01T00:00:00Z","timestamp":1622505600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,6]]},"DOI":"10.1109\/icc42927.2021.9500849","type":"proceedings-article","created":{"date-parts":[[2021,8,6]],"date-time":"2021-08-06T16:49:21Z","timestamp":1628268561000},"page":"1-6","source":"Crossref","is-referenced-by-count":3,"title":["Transmit Power Pool Design for Uplink IoT Networks with Grant-free NOMA"],"prefix":"10.1109","author":[{"given":"Muhammad","family":"Fayaz","sequence":"first","affiliation":[{"name":"Queen Mary University of London,London,UK"}]},{"given":"Wenqiang","family":"Yi","sequence":"additional","affiliation":[{"name":"Queen Mary University of London,London,UK"}]},{"given":"Yuanwei","family":"Liu","sequence":"additional","affiliation":[{"name":"Queen Mary University of London,London,UK"}]},{"given":"Arumugam","family":"Nallanathan","sequence":"additional","affiliation":[{"name":"Queen Mary University of London,London,UK"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2019.2933962"},{"journal-title":"Reinforcement Learning An Introduction","year":"2018","author":"sutton","key":"ref11"},{"key":"ref12","article-title":"From single-agent to multi-agent reinforcement learning: Foundational concepts and methods","author":"neto","year":"2005","journal-title":"Learning theory course"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2930115"},{"key":"ref14","article-title":"Deep reinforcement learning with double q-learning","author":"van hasselt","year":"2015","journal-title":"arXiv preprint arXiv 1509 06461"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2020.2972274"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2019.2897632"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2017.2687218"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2020.2996032"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2017.2768666"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2019.2945332"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2018.2881120"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2019.2916177"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"}],"event":{"name":"ICC 2021 - IEEE International Conference on Communications","start":{"date-parts":[[2021,6,14]]},"location":"Montreal, QC, Canada","end":{"date-parts":[[2021,6,23]]}},"container-title":["ICC 2021 - IEEE International Conference on Communications"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9500243\/9500244\/09500849.pdf?arnumber=9500849","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,2]],"date-time":"2022-08-02T20:07:54Z","timestamp":1659470874000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9500849\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,6]]},"references-count":14,"URL":"https:\/\/doi.org\/10.1109\/icc42927.2021.9500849","relation":{},"subject":[],"published":{"date-parts":[[2021,6]]}}}