{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,24]],"date-time":"2025-12-24T12:20:13Z","timestamp":1766578813641,"version":"3.37.3"},"reference-count":17,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,8,10]],"date-time":"2023-08-10T00:00:00Z","timestamp":1691625600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,8,10]],"date-time":"2023-08-10T00:00:00Z","timestamp":1691625600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100004608","name":"Natural Science Foundation of Jiangsu Province","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100004608","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,8,10]]},"DOI":"10.1109\/iccc57788.2023.10233612","type":"proceedings-article","created":{"date-parts":[[2023,9,5]],"date-time":"2023-09-05T17:28:28Z","timestamp":1693934908000},"page":"1-6","source":"Crossref","is-referenced-by-count":1,"title":["Deep Reinforcement Learning for Network Security Applications With A Safety Guide"],"prefix":"10.1109","author":[{"given":"Zhibo","family":"Liu","sequence":"first","affiliation":[{"name":"Nanjing University of Aeronautics and Astronautics,College of Computer Science and Technology,Nanjing,China"}]},{"given":"Xiaozhen","family":"Lu","sequence":"additional","affiliation":[{"name":"Nanjing University of Aeronautics and Astronautics,College of Computer Science and Technology,Nanjing,China"}]},{"given":"Yuhan","family":"Chen","sequence":"additional","affiliation":[{"name":"Nanjing University of Aeronautics and Astronautics,College of Computer Science and Technology,Nanjing,China"}]},{"given":"Yilin","family":"Xiao","sequence":"additional","affiliation":[{"name":"Shenzhen Institute of Artificial Intelligence and Robotics for Society,Shenzhen,China"}]},{"given":"Liang","family":"Xiao","sequence":"additional","affiliation":[{"name":"Xiamen University,Department of Informatics and Communication Engineering,Xiamen,China"}]},{"given":"Yanling","family":"Bu","sequence":"additional","affiliation":[{"name":"Nanjing University of Aeronautics and Astronautics,College of Computer Science and Technology,Nanjing,China"}]}],"member":"263","reference":[{"key":"ref13","first-page":"24 432","article-title":"Model-based safe deep reinforcement learning via a constrained proximal policy optimization algorithm","volume":"35","author":"jayant","year":"2022","journal-title":"Proc Neural Inf Process Syst (NeurIPS)"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i12.17272"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2022.3169813"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/GLOBECOM42002.2020.9348210"},{"key":"ref11","first-page":"12 151","article-title":"Safe reinforcement learning via curriculum induction","author":"turchetta","year":"2020","journal-title":"Proc Neural Inf Process Syst (NeurIPS)"},{"key":"ref10","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"article-title":"Proximal policy optimization algorithms","year":"2017","author":"schulman","key":"ref2"},{"key":"ref1","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"journal-title":"Reinforcement Learning An Introduction","year":"2018","author":"sutton","key":"ref17"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/IROS47612.2022.9981366"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TIFS.2022.3149396"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v31i1.10827"},{"article-title":"Continuous control with deep reinforcement learning","year":"2015","author":"lillicrap","key":"ref9"},{"key":"ref4","first-page":"1437","article-title":"A comprehensive survey on safe reinforcement learning","volume":"16","author":"garc?a","year":"2015","journal-title":"J Mach Learn Research"},{"key":"ref3","article-title":"DRL-based resource allocation in remote state estimation","author":"pang","year":"2022","journal-title":"IEEE Trans Wireless Commun"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2020.3007742"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682983"}],"event":{"name":"2023 IEEE\/CIC International Conference on Communications in China (ICCC)","start":{"date-parts":[[2023,8,10]]},"location":"Dalian, China","end":{"date-parts":[[2023,8,12]]}},"container-title":["2023 IEEE\/CIC International Conference on Communications in China (ICCC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10233255\/10233122\/10233612.pdf?arnumber=10233612","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,9,25]],"date-time":"2023-09-25T17:57:16Z","timestamp":1695664636000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10233612\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,8,10]]},"references-count":17,"URL":"https:\/\/doi.org\/10.1109\/iccc57788.2023.10233612","relation":{},"subject":[],"published":{"date-parts":[[2023,8,10]]}}}