{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T18:48:52Z","timestamp":1773773332160,"version":"3.50.1"},"reference-count":32,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2020,9,1]],"date-time":"2020-09-01T00:00:00Z","timestamp":1598918400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,9,1]],"date-time":"2020-09-01T00:00:00Z","timestamp":1598918400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,9,1]],"date-time":"2020-09-01T00:00:00Z","timestamp":1598918400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Cogn. Commun. Netw."],"published-print":{"date-parts":[[2020,9]]},"DOI":"10.1109\/tccn.2020.2992628","type":"journal-article","created":{"date-parts":[[2020,5,5]],"date-time":"2020-05-05T19:52:57Z","timestamp":1588708377000},"page":"970-979","source":"Crossref","is-referenced-by-count":102,"title":["Deep Reinforcement Learning-Based Spectrum Allocation in Integrated Access and Backhaul Networks"],"prefix":"10.1109","volume":"6","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9878-3722","authenticated-orcid":false,"given":"Wanlu","family":"Lei","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4686-0973","authenticated-orcid":false,"given":"Yu","family":"Ye","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5407-0835","authenticated-orcid":false,"given":"Ming","family":"Xiao","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2016.7798980"},{"key":"ref31","first-page":"56","article-title":"Radio frequency (RF) system scenarios","volume":"8","author":"access","year":"2011","journal-title":"Release"},{"key":"ref30","first-page":"387","article-title":"Deterministic policy gradient algorithms","author":"silver","year":"2014","journal-title":"Proc 31st Int Conf Mach Learn"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2020.2965443"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CC.2016.7563684"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1038\/nature16961"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICC.2017.7997440"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/VTCFall.2018.8690757"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2019.2912754"},{"key":"ref17","author":"lillicrap","year":"2015","journal-title":"Continuous control with deep reinforcement learning"},{"key":"ref18","author":"dulac-arnold","year":"2015","journal-title":"Deep reinforcement learning in large discrete action spaces"},{"key":"ref19","doi-asserted-by":"crossref","first-page":"57","DOI":"10.1109\/JSTSP.2007.914876","article-title":"Dynamic spectrum management: Complexity and duality","volume":"2","author":"luo","year":"2008","journal-title":"IEEE J Sel Topics Signal Process"},{"key":"ref28","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation","author":"sutton","year":"2000","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICC.2018.8422149"},{"key":"ref27","author":"wang","year":"2015","journal-title":"Dueling network architectures for deep reinforcement learning"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2018.2874655"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/GLOCOM.2018.8647977"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ACC.2012.6315022"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CAMAD.2018.8514996"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2018.2868046"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2009.10.080529"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/6.938713"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/MNET.001.1900188"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/VTCFall.2019.8891507"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9781139565844"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1613\/jair.301"},{"key":"ref21","volume":"2","author":"sutton","year":"1998","journal-title":"Introduction to Reinforcement Learning"},{"key":"ref24","author":"mnih","year":"2013","journal-title":"Playing atari with deep reinforcement learning"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/BF00993306"},{"key":"ref26","author":"schaul","year":"2015","journal-title":"Prioritized experience replay"},{"key":"ref25","author":"van hasselt","year":"2015","journal-title":"Deep reinforcement learning with double q-learning"}],"container-title":["IEEE Transactions on Cognitive Communications and Networking"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6687307\/9188038\/09086877.pdf?arnumber=9086877","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T17:31:48Z","timestamp":1651080708000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9086877\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,9]]},"references-count":32,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/tccn.2020.2992628","relation":{},"ISSN":["2332-7731","2372-2045"],"issn-type":[{"value":"2332-7731","type":"electronic"},{"value":"2372-2045","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020,9]]}}}