{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T07:18:14Z","timestamp":1772349494104,"version":"3.50.1"},"reference-count":14,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,12,1]],"date-time":"2021-12-01T00:00:00Z","timestamp":1638316800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,12,1]],"date-time":"2021-12-01T00:00:00Z","timestamp":1638316800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,12]]},"DOI":"10.1109\/globecom46510.2021.9685621","type":"proceedings-article","created":{"date-parts":[[2022,2,2]],"date-time":"2022-02-02T16:59:04Z","timestamp":1643821144000},"page":"1-6","source":"Crossref","is-referenced-by-count":4,"title":["Reliable Reinforcement Learning Based NOMA Schemes for URLLC"],"prefix":"10.1109","author":[{"given":"Waleed","family":"Ahsan","sequence":"first","affiliation":[{"name":"Queen Mary University of London,London,UK"}]},{"given":"Wenqiang","family":"Yi","sequence":"additional","affiliation":[{"name":"Queen Mary University of London,London,UK"}]},{"given":"Yuanwei","family":"Liu","sequence":"additional","affiliation":[{"name":"Queen Mary University of London,London,UK"}]},{"given":"Arumugam","family":"Nallanathan","sequence":"additional","affiliation":[{"name":"Queen Mary University of London,London,UK"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2018.2796542"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2016.2549378"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/LWC.2019.2908912"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.2014.2318726"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2021.3065523"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.2010.2043769"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/MCOM.2017.1601092"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2019.2898785"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2019.2957745"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2019.2963185"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2020.2992786"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2019.2897632"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2020.2977845"},{"key":"ref9","article-title":"Understanding multi-step deep reinforcement learning: A systematic study of the DQN target","author":"hernandez-garcia","year":"2019","journal-title":"ArXiv Preprint"}],"event":{"name":"GLOBECOM 2021 - 2021 IEEE Global Communications Conference","location":"Madrid, Spain","start":{"date-parts":[[2021,12,7]]},"end":{"date-parts":[[2021,12,11]]}},"container-title":["2021 IEEE Global Communications Conference (GLOBECOM)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9685019\/9685006\/09685621.pdf?arnumber=9685621","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,6]],"date-time":"2022-06-06T16:23:56Z","timestamp":1654532636000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9685621\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,12]]},"references-count":14,"URL":"https:\/\/doi.org\/10.1109\/globecom46510.2021.9685621","relation":{},"subject":[],"published":{"date-parts":[[2021,12]]}}}