{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T19:28:03Z","timestamp":1774121283032,"version":"3.50.1"},"reference-count":54,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2024,3,1]],"date-time":"2024-03-01T00:00:00Z","timestamp":1709251200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,3,1]],"date-time":"2024-03-01T00:00:00Z","timestamp":1709251200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,3,1]],"date-time":"2024-03-01T00:00:00Z","timestamp":1709251200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100007053","name":"Korea Institute of Energy Technology Evaluation and Planning (KETEP) and the Ministry of Trade, Industry and Energy (MOTIE) of the Republic of Korea","doi-asserted-by":"publisher","award":["RS-2023-00235742"],"award-info":[{"award-number":["RS-2023-00235742"]}],"id":[{"id":"10.13039\/501100007053","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003725","name":"National Research Foundation of Korea (NRF) Grant funded by the Ministry of Science and ICT, South Korea","doi-asserted-by":"publisher","award":["NRF-2021R1A2C2094350"],"award-info":[{"award-number":["NRF-2021R1A2C2094350"]}],"id":[{"id":"10.13039\/501100003725","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Institute of Information and Communications Technology Planning and Evaluation (IITP) Grant funded by the Korea Government","award":["2020-0-01373"],"award-info":[{"award-number":["2020-0-01373"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Smart Grid"],"published-print":{"date-parts":[[2024,3]]},"DOI":"10.1109\/tsg.2023.3302846","type":"journal-article","created":{"date-parts":[[2023,8,8]],"date-time":"2023-08-08T17:34:31Z","timestamp":1691516071000},"page":"1423-1437","source":"Crossref","is-referenced-by-count":17,"title":["Deep Reinforcement Learning-Based Active Network Management and Emergency Load-Shedding Control for Power Systems"],"prefix":"10.1109","volume":"15","author":[{"given":"Haotian","family":"Zhang","sequence":"first","affiliation":[{"name":"Department of Electrical Engineering, Hanyang University, Seoul, South Korea"}]},{"given":"Xinfeng","family":"Sun","sequence":"additional","affiliation":[{"name":"Department of Electrical Engineering, Hanyang University, Seoul, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5105-4805","authenticated-orcid":false,"given":"Myoung Hoon","family":"Lee","sequence":"additional","affiliation":[{"name":"Department of Electrical Engineering, Incheon National University, Incheon, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8877-9519","authenticated-orcid":false,"given":"Jun","family":"Moon","sequence":"additional","affiliation":[{"name":"Department of Electrical Engineering, Hanyang University, Seoul, South Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/SURV.2011.101911.00087"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2005.857486"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.3390\/en12040682"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.egyai.2021.100092"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2019.2933191"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.epsr.2011.04.003"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2015.2410171"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1049\/ip-gtd:20020218"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/s11081-016-9339-9"},{"key":"ref10","article-title":"Accelerated deep reinforcement learning based load shedding for emergency voltage control","author":"Huang","year":"2020","journal-title":"arXiv:2006.12667"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2021.118221"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-29724-3_10"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.17775\/CSEEJPES.2019.00920"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.35833\/MPCE.2020.000552"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.arcontrol.2020.03.001"},{"key":"ref17","article-title":"Boosting soft actor-critic: Emphasizing recent experience without forgetting the past","author":"Wang","year":"2019","journal-title":"arXiv:1906.04009"},{"key":"ref18","volume-title":"Pytorch implementation of soft-actor-critic-and-extensions","author":"Dittert","year":"2020"},{"key":"ref19","article-title":"Prioritized experience replay","author":"Schaul","year":"2015","journal-title":"arXiv:1511.05952"},{"key":"ref20","first-page":"13644","article-title":"Constrained variational policy optimization for safe reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Liu"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/UPEC.2015.7339816"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/PTC.2019.8810698"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.3010876"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2931955"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1049\/gtd2.12795"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.17775\/CSEEJPES.2020.06120"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3121757"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1016\/j.icte.2022.06.004"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ROBIO.2018.8665248"},{"key":"ref30","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja"},{"key":"ref31","article-title":"Continuous control with deep reinforcement learning","author":"Lillicrap","year":"2015","journal-title":"arXiv:1509.02971"},{"key":"ref32","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv:1707.06347"},{"key":"ref33","article-title":"Maximum a posteriori policy optimisation","author":"Abdolmaleki","year":"2018","journal-title":"arXiv:1806.06920"},{"key":"ref34","article-title":"Reinforcement learning and control as probabilistic inference: Tutorial and review","author":"Levine","year":"2018","journal-title":"arXiv:1805.00909"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2020.3008364"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2020.3010130"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2019.2941498"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2020.3034827"},{"key":"ref39","article-title":"Openai gym","author":"Brockman","year":"2016","journal-title":"arXiv:1606.01540"},{"issue":"1","key":"ref40","first-page":"1437","article-title":"A comprehensive survey on safe reinforcement learning","volume":"16","author":"Garc\u0131a","year":"2015","journal-title":"J. Mach. Learn. Res."},{"key":"ref41","first-page":"22","article-title":"Constrained policy optimization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Achiam"},{"key":"ref42","article-title":"Reward constrained policy optimization","author":"Tessler","year":"2018","journal-title":"arXiv:1805.11074"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2021.118403"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/tpwrs.2023.3237888"},{"key":"ref45","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton","year":"2018"},{"key":"ref46","article-title":"MPC-based realtime power system control with DNN-based prediction\/sensitivity-estimation","author":"Hossain","year":"2021","journal-title":"arXiv:2106.02794"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/TPAS.1968.292194"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/61.127040"},{"key":"ref49","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Mnih"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1812.05905"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1016\/j.icte.2022.06.004"},{"key":"ref52","first-page":"8103","article-title":"A Lyapunov-based approach to safe reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Chow"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/TPAS.1979.319407"},{"issue":"2","key":"ref54","doi-asserted-by":"crossref","DOI":"10.1088\/1751-8121\/ab5d4d","article-title":"Quantum fisher information matrix and multiparameter estimation","volume":"53","author":"Liu","year":"2019","journal-title":"J. Phys. A Math. Theor."}],"container-title":["IEEE Transactions on Smart Grid"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5165411\/10440375\/10210687.pdf?arnumber=10210687","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,1]],"date-time":"2024-09-01T04:37:17Z","timestamp":1725165437000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10210687\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,3]]},"references-count":54,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tsg.2023.3302846","relation":{},"ISSN":["1949-3053","1949-3061"],"issn-type":[{"value":"1949-3053","type":"print"},{"value":"1949-3061","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,3]]}}}