{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,7,16]],"date-time":"2025-07-16T13:11:19Z","timestamp":1752671479201,"version":"3.37.3"},"reference-count":47,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"8","license":[{"start":{"date-parts":[[2021,8,1]],"date-time":"2021-08-01T00:00:00Z","timestamp":1627776000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,8,1]],"date-time":"2021-08-01T00:00:00Z","timestamp":1627776000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000001","name":"NSF","doi-asserted-by":"publisher","award":["1704662","1704092"],"award-info":[{"award-number":["1704662","1704092"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE J. Select. Areas Commun."],"published-print":{"date-parts":[[2021,8]]},"DOI":"10.1109\/jsac.2021.3087270","type":"journal-article","created":{"date-parts":[[2021,6,14]],"date-time":"2021-06-14T19:49:40Z","timestamp":1623700180000},"page":"2476-2486","source":"Crossref","is-referenced-by-count":10,"title":["PnP-DRL: A Plug-and-Play Deep Reinforcement Learning Approach for Experience-Driven Networking"],"prefix":"10.1109","volume":"39","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-2879-3244","authenticated-orcid":false,"given":"Zhiyuan","family":"Xu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2095-2140","authenticated-orcid":false,"given":"Kun","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Weiyi","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4418-0114","authenticated-orcid":false,"given":"Jian","family":"Tang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3024-7990","authenticated-orcid":false,"given":"Yanzhi","family":"Wang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5833-8894","authenticated-orcid":false,"given":"Guoliang","family":"Xue","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1145\/3005745.3005750"},{"key":"ref38","first-page":"1995","article-title":"Dueling network architectures for deep reinforcement learning","author":"wang","year":"2016","journal-title":"Proc ICML"},{"key":"ref33","article-title":"D4RL: Datasets for deep data-driven reinforcement learning","author":"fu","year":"2020","journal-title":"arXiv 2004 07219"},{"key":"ref32","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"Proc ICML"},{"key":"ref31","first-page":"417","article-title":"Mahimahi: Accurate record-and-replay for HTTP","author":"netravali","year":"2015","journal-title":"Proc USENIX ATC"},{"key":"ref30","first-page":"731","article-title":"Pantheon: The training ground for Internet congestion-control research","author":"yan","year":"2018","journal-title":"Proc USENIX ATC"},{"key":"ref37","first-page":"2094","article-title":"Deep reinforcement learning with double Q-learning","author":"van hasselt","year":"2016","journal-title":"Proc AAAI"},{"journal-title":"Raw Data&#x2014;Measuring Broadband America","year":"2020","key":"ref36"},{"key":"ref35","first-page":"6818","article-title":"Imitation learning from imperfect demonstration","author":"wu","year":"2019","journal-title":"Proc ICML"},{"key":"ref34","first-page":"7248","article-title":"RL Unplugged: A collection of benchmarks for offline reinforcement learning","author":"gulcehre","year":"2020","journal-title":"Proc NeurIPS"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TCCN.2017.2755007"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM41043.2020.9155250"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/3098822.3098843"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2006.879827"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/2934872.2934898"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/MNET.2017.1700200"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TNET.2013.2281542"},{"journal-title":"How Consumers Judge Their Viewing Experience A 2015 Consumer Survey Report","year":"2020","key":"ref16"},{"key":"ref17","first-page":"2052","article-title":"Off-policy deep reinforcement learning without exploration","author":"fujimoto","year":"2019","journal-title":"Proc ICML"},{"key":"ref18","article-title":"Benchmarking batch deep reinforcement learning algorithms","author":"fujimoto","year":"2019","journal-title":"arXiv 1910 01708"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref28","first-page":"1","article-title":"Rectified linear units improve restricted Boltzmann machines","author":"nair","year":"2010","journal-title":"Proc ICML"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM.2016.7524428"},{"key":"ref27","first-page":"1","article-title":"Automatic differentiation in PyTorch","author":"paszke","year":"2017","journal-title":"Proc NIPS"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-84882-765-3_2"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/2785956.2787486"},{"key":"ref29","first-page":"1","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2015","journal-title":"Proc ICLR"},{"key":"ref5","doi-asserted-by":"crossref","first-page":"1717","DOI":"10.1109\/TNET.2011.2134866","article-title":"Link-state routing with hop-by-hop forwarding can achieve optimal traffic engineering","volume":"19","author":"xu","year":"2011","journal-title":"IEEE\/ACM Trans Netw"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM.2018.8485853"},{"key":"ref7","first-page":"3050","article-title":"A deep reinforcement learning perspective on Internet congestion control","author":"jay","year":"2019","journal-title":"Proc ICML"},{"journal-title":"Open shortest path first (ospf)","year":"2020","key":"ref2"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3152434.3152441"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/2619239.2626296"},{"key":"ref46","article-title":"Way off-policy batch deep reinforcement learning of implicit human preferences in dialog","author":"jaques","year":"2019","journal-title":"arXiv 1907 00456"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.14778\/3199517.3199521"},{"key":"ref45","first-page":"11784","article-title":"Stabilizing off-policy Q-learning via bootstrapping error reduction","author":"kumar","year":"2019","journal-title":"Proc NeurIPS"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-27645-3_2"},{"volume":"7","journal-title":"Cisco Visual Networking Index Global Mobile Data Traffic Forecast Update","year":"2017","key":"ref47"},{"key":"ref21","first-page":"2829","article-title":"Continuous deep Q-learning with model-based acceleration","author":"gu","year":"2019","journal-title":"Proc ICML"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/WCNC.2018.8377343"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1145\/2483977.2483991"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2019.2904358"},{"key":"ref23","article-title":"Offline reinforcement learning: Tutorial, review, and perspectives on open problems","author":"levine","year":"2020","journal-title":"arXiv 2005 01643"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1145\/3300061.3345431"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1186\/s40537-019-0192-5"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TCCN.2018.2809722"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/1943552.1943572"}],"container-title":["IEEE Journal on Selected Areas in Communications"],"original-title":[],"link":[{"URL":"https:\/\/ieeexplore.ieee.org\/ielam\/49\/9486979\/9454317-aam.pdf","content-type":"application\/pdf","content-version":"am","intended-application":"syndication"},{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/49\/9486979\/09454317.pdf?arnumber=9454317","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,4,5]],"date-time":"2022-04-05T19:56:15Z","timestamp":1649188575000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9454317\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,8]]},"references-count":47,"journal-issue":{"issue":"8"},"URL":"https:\/\/doi.org\/10.1109\/jsac.2021.3087270","relation":{},"ISSN":["0733-8716","1558-0008"],"issn-type":[{"type":"print","value":"0733-8716"},{"type":"electronic","value":"1558-0008"}],"subject":[],"published":{"date-parts":[[2021,8]]}}}