{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,18]],"date-time":"2026-03-18T14:21:37Z","timestamp":1773843697498,"version":"3.50.1"},"reference-count":25,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2019,1,1]],"date-time":"2019-01-01T00:00:00Z","timestamp":1546300800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["91738202"],"award-info":[{"award-number":["91738202"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61790553"],"award-info":[{"award-number":["61790553"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003999","name":"Science Fund for Creative Research Groups","doi-asserted-by":"publisher","award":["61321061"],"award-info":[{"award-number":["61321061"]}],"id":[{"id":"10.13039\/501100003999","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Shenzhen Science and Technology Plan Projects","award":["JCYJ20180306170614484"],"award-info":[{"award-number":["JCYJ20180306170614484"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2019]]},"DOI":"10.1109\/access.2019.2937108","type":"journal-article","created":{"date-parts":[[2019,8,23]],"date-time":"2019-08-23T19:59:08Z","timestamp":1566590348000},"page":"118898-118906","source":"Crossref","is-referenced-by-count":37,"title":["Multi-Agent Deep Reinforcement Learning-Based Cooperative Spectrum Sensing With Upper Confidence Bound Exploration"],"prefix":"10.1109","volume":"7","author":[{"given":"Yu","family":"Zhang","sequence":"first","affiliation":[{"name":"Department of Electronic Engineering, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0718-5496","authenticated-orcid":false,"given":"Peixiang","family":"Cai","sequence":"additional","affiliation":[{"name":"Department of Electronic Engineering, Tsinghua University, Beijing, China"}]},{"given":"Changyong","family":"Pan","sequence":"additional","affiliation":[{"name":"Department of Electronic Engineering, Tsinghua University, Beijing, China"}]},{"given":"Subing","family":"Zhang","sequence":"additional","affiliation":[{"name":"China Electronics Standardization Institute, Beijing, China"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2018.2801833"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1002\/ett.2803"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2018.2853988"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/COGART.2009.5167238"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICCC.2015.7432933"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/s11276-012-0530-4"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/VETECF.2010.5594301"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/WCNC.2017.7925694"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/DYSPAN.2011.5936261"},{"key":"ref19","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/SURV.2009.090109"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/98.788210"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-00951-3_20"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICC.2006.254957"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/DYSPAN.2005.1542628"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CROWNCOM.2008.4562451"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CROWNCOM.2007.4549771"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2017.2761910"},{"key":"ref1","article-title":"Spectrum policy task force","author":"kolodzy","year":"2002"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TCCN.2018.2809722"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952524"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2018.2831240"},{"key":"ref24","first-page":"242","article-title":"Multiagent reinforcement learning: Theoretical framework and an algorithm","volume":"98","author":"hu","year":"1998","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref23","first-page":"4868","article-title":"Is q-learning provably efficient?","author":"jin","year":"2018","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/8600701\/08811461.pdf?arnumber=8811461","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,9,8]],"date-time":"2022-09-08T19:53:13Z","timestamp":1662666793000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8811461\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/access.2019.2937108","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2019]]}}}