{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T05:55:20Z","timestamp":1774590920492,"version":"3.50.1"},"reference-count":49,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","funder":[{"DOI":"10.13039\/501100001381","name":"Singapore National Research Foundation, through the National Cybersecurity Research and Development Program","doi-asserted-by":"publisher","award":["NRF2018NCR-NCR005-0001"],"award-info":[{"award-number":["NRF2018NCR-NCR005-0001"]}],"id":[{"id":"10.13039\/501100001381","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001381","name":"Singapore National Research Foundation","doi-asserted-by":"publisher","award":["NCR NRF2018NCR-NSOE003-0001"],"award-info":[{"award-number":["NCR NRF2018NCR-NSOE003-0001"]}],"id":[{"id":"10.13039\/501100001381","id-type":"DOI","asserted-by":"publisher"}]},{"name":"NRF Investigatorship","award":["NRFI06-2020-0022"],"award-info":[{"award-number":["NRFI06-2020-0022"]}]},{"name":"NRF Investigatorship","award":["NRFNRFI06-2020-0001"],"award-info":[{"award-number":["NRFNRFI06-2020-0001"]}]},{"name":"Ministry of Education, Republic of Singapore","award":["AcRF TIER 1 2017-T1-001-228 (RG92\/17)"],"award-info":[{"award-number":["AcRF TIER 1 2017-T1-001-228 (RG92\/17)"]}]},{"DOI":"10.13039\/501100001475","name":"Nanyang Assistant Professorship from Nanyang Technological University, Singapore","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001475","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Smart Grid"],"published-print":{"date-parts":[[2021,7]]},"DOI":"10.1109\/tsg.2021.3062700","type":"journal-article","created":{"date-parts":[[2021,3,1]],"date-time":"2021-03-01T21:50:53Z","timestamp":1614635453000},"page":"3613-3623","source":"Crossref","is-referenced-by-count":56,"title":["Vulnerability Assessment of Deep Reinforcement Learning Models for Power System Topology Optimization"],"prefix":"10.1109","volume":"12","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-2741-058X","authenticated-orcid":false,"given":"Yan","family":"Zheng","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5303-4138","authenticated-orcid":false,"given":"Ziming","family":"Yan","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5099-7054","authenticated-orcid":false,"given":"Kangjie","family":"Chen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8648-527X","authenticated-orcid":false,"given":"Jianwen","family":"Sun","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0503-183X","authenticated-orcid":false,"given":"Yan","family":"Xu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7300-9215","authenticated-orcid":false,"given":"Yang","family":"Liu","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/89"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/89"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2017\/525"},{"key":"ref32","author":"zhang","year":"2018","journal-title":"A study on overfitting in deep eeinforcement learning"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TPWRS.2014.2379112"},{"key":"ref30","author":"seo","year":"2018","journal-title":"Noise-adding methods of saliency map as series of higher order partial derivative"},{"key":"ref37","author":"kos","year":"2017","journal-title":"Delving into adversarial attacks on deep policies"},{"key":"ref36","author":"pattanaik","year":"2017","journal-title":"Robust deep reinforcement learning with adversarial attacks"},{"key":"ref35","author":"behzadan","year":"2017","journal-title":"Whatever does not kill deep reinforcement learning makes it stronger"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-62416-7_19"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/EuroSP.2016.36"},{"key":"ref27","author":"hussenot","year":"2019","journal-title":"CopyCAT Taking control of neural policies with constant attacks"},{"key":"ref29","author":"huang","year":"2017","journal-title":"Adversarial attacks on neural network policies"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.17775\/CSEEJPES.2018.00520"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2010.2046346"},{"key":"ref20","author":"marot","year":"2019","journal-title":"Learning to run a power network challenge for training topology controllers"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TPWRS.2016.2631891"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1049\/iet-cps.2016.0019"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2015.2388545"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2017.2709252"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.282"},{"key":"ref25","author":"goodfellow","year":"2014","journal-title":"Explaining and Harnessing Adversarial Examples"},{"key":"ref10","author":"lillicrap","year":"2015","journal-title":"Continuous control with deep reinforcement learning"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2018.2879572"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i01.5439"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2019.2955437"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2019.2903756"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2020.2971427"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TPWRS.2018.2881359"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TPWRS.2019.2941134"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2019.2962625"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2019.2933191"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2020.3005270"},{"key":"ref4","year":"2020","journal-title":"Geirina Team Deep Reinforcement Learning to Run Power Networks With Deep Q Networks GEIRI North America"},{"key":"ref3","year":"2020","journal-title":"Apogee Project Team Learning to Run a Power Network Challenge R&#x00E9;seau de Transport d&#x2019;&#x00C9;lectricit&#x00E9;"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2015.2495133"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2017.2653219"},{"key":"ref8","doi-asserted-by":"crossref","first-page":"262","DOI":"10.1007\/978-3-319-62416-7_19","article-title":"Vulnerability of deep reinforcement learning to policy induction attacks","author":"behzadan","year":"2017","journal-title":"Proc Int Conf Mach Learn Data Min Pattern Recognit"},{"key":"ref7","author":"szegedy","year":"2013","journal-title":"Intriguing properties of neural networks"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1007\/s11390-020-9967-6"},{"key":"ref9","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ASE.2019.00077"},{"key":"ref45","first-page":"2282","article-title":"Bayes-ToMoP: A fast detection and best response algorithm towards sophisticated opponents","author":"yang","year":"2019","journal-title":"Proc 18th Int Conf Auton Agents MultiAgent Syst"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2020\/466"},{"key":"ref47","first-page":"888","article-title":"Automatic Web testing using curiosity-driven reinforcement learning","author":"zheng","year":"2021","journal-title":"Proc 43rd Int Conf Softw Eng"},{"key":"ref42","first-page":"962","article-title":"A deep Bayesian policy reuse approach against non-stationary agents","author":"zheng","year":"2018","journal-title":"Proc 31st Annu Conf Neural Inf Process Syst"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.6047"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/88"},{"key":"ref43","first-page":"1","article-title":"Efficient policy detecting and reusing for non-stationarity in Markov games","volume":"35","author":"zheng","year":"2020","journal-title":"J Syst Softw"}],"container-title":["IEEE Transactions on Smart Grid"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5165411\/9460803\/09365691.pdf?arnumber=9365691","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,10,26]],"date-time":"2021-10-26T20:32:40Z","timestamp":1635280360000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9365691\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,7]]},"references-count":49,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/tsg.2021.3062700","relation":{},"ISSN":["1949-3053","1949-3061"],"issn-type":[{"value":"1949-3053","type":"print"},{"value":"1949-3061","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,7]]}}}