{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,15]],"date-time":"2026-01-15T01:46:39Z","timestamp":1768441599622,"version":"3.49.0"},"reference-count":47,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"1","license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61772139"],"award-info":[{"award-number":["61772139"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62072117"],"award-info":[{"award-number":["62072117"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Key-Area Research and Development Program of Guangdong Province","award":["2020B010166003"],"award-info":[{"award-number":["2020B010166003"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE J. Select. Areas Commun."],"published-print":{"date-parts":[[2022,1]]},"DOI":"10.1109\/jsac.2021.3126085","type":"journal-article","created":{"date-parts":[[2021,11,10]],"date-time":"2021-11-10T21:41:38Z","timestamp":1636580498000},"page":"376-392","source":"Crossref","is-referenced-by-count":19,"title":["Enabling Robust DRL-Driven Networking Systems via Teacher-Student Learning"],"prefix":"10.1109","volume":"40","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8823-2460","authenticated-orcid":false,"given":"Ying","family":"Zheng","sequence":"first","affiliation":[]},{"given":"Lixiang","family":"Lin","sequence":"additional","affiliation":[]},{"given":"Tianqi","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Haoyu","family":"Chen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9268-1979","authenticated-orcid":false,"given":"Qingyang","family":"Duan","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4168-3998","authenticated-orcid":false,"given":"Yuedong","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Xin","family":"Wang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1038\/nature24270"},{"key":"ref2","article-title":"Playing atari with deep reinforcement learning","author":"Mnih","year":"2013","journal-title":"arXiv:1312.5602"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3005745.3005750"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/3341302.3342080"},{"key":"ref5","first-page":"3050","article-title":"A deep reinforcement learning perspective on internet congestion control","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Jay"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3098822.3098843"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM41043.2020.9155492"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM41043.2020.9155411"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3240508.3240545"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3152434.3152441"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.23919\/WiOPT47501.2019.9144110"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3232565.3232569"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/2408776.2408794"},{"key":"ref14","article-title":"Variance reduction for reinforcement learning in input-driven environments","author":"Mao","year":"2018","journal-title":"arXiv:1807.02264"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P16-1228"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1145\/2619239.2626296"},{"key":"ref17","first-page":"20","article-title":"BBR: Congestion-based congestion control","volume":"14","author":"Cardwell","year":"2016","journal-title":"Netw. Congestion"},{"key":"ref18","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton","year":"2018"},{"key":"ref19","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NIPS)","author":"Sutton"},{"key":"ref20","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv:1707.06347"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"ref22","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Mnih"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/1400097.1400105"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1145\/3341216.3342218"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM41043.2020.9155250"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TNET.2020.3048666"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/3387514.3405859"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1145\/3387514.3405892"},{"key":"ref29","article-title":"Towards safe online reinforcement learning in computer systems","volume-title":"Proc. NeurIPS Mach. Learn. Syst. Workshop","author":"Mao"},{"issue":"1","key":"ref30","first-page":"1437","article-title":"A comprehensive survey on safe reinforcement learning","volume":"16","author":"Garc\u00eda","year":"2015","journal-title":"J. Mach. Learn. Res."},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1287\/mnsc.18.7.356"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1137\/0325004"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2017.11.012"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1201\/9781315140223"},{"key":"ref35","article-title":"Policy gradients with variance related risk criteria","author":"Di Castro","year":"2012","journal-title":"arXiv:1206.6404"},{"key":"ref36","first-page":"23","article-title":"Apprenticeship learning for initial value functions in reinforcement learning","author":"Maire","year":"2005","journal-title":"Planning and Learning in a Priori Unknown or Dynamic Domains"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1007\/s12555-012-0119-9"},{"key":"ref38","volume-title":"On Integrating Apprentice Learning and Reinforcement Learning","author":"Clouse","year":"1996"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1613\/jair.3761"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1007\/BF02888435"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.13001\/1081-3810.1122"},{"key":"ref42","first-page":"763","article-title":"Knowledge revision for reinforcement learning with abstract MDPs","volume-title":"Proc. Int. Conf. Auto. Agents Multiagent Syst. (AAMAS)","author":"Efthymiadis"},{"key":"ref43","first-page":"278","article-title":"Policy invariance under reward transformations: Theory and application to reward shaping","volume-title":"Proc. ICML","volume":"99","author":"Ng"},{"key":"ref44","article-title":"Prioritized experience replay","author":"Schaul","year":"2015","journal-title":"arXiv:1511.05952"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1145\/2785956.2787486"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/TNET.2020.2996964"},{"issue":"14","key":"ref47","first-page":"527","article-title":"Network simulations with the ns-3 simulator","volume":"14","author":"Henderson","year":"2008","journal-title":"SIGCOMM Demonstration"}],"container-title":["IEEE Journal on Selected Areas in Communications"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/49\/9653871\/09611275.pdf?arnumber=9611275","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,12]],"date-time":"2024-01-12T01:54:37Z","timestamp":1705024477000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9611275\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,1]]},"references-count":47,"journal-issue":{"issue":"1"},"URL":"https:\/\/doi.org\/10.1109\/jsac.2021.3126085","relation":{},"ISSN":["0733-8716","1558-0008"],"issn-type":[{"value":"0733-8716","type":"print"},{"value":"1558-0008","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,1]]}}}