{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,9]],"date-time":"2026-03-09T15:42:47Z","timestamp":1773070967401,"version":"3.50.1"},"reference-count":12,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"name":"Science Program of State Grid"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2021]]},"DOI":"10.1109\/access.2021.3089625","type":"journal-article","created":{"date-parts":[[2021,6,15]],"date-time":"2021-06-15T19:52:35Z","timestamp":1623786755000},"page":"90358-90365","source":"Crossref","is-referenced-by-count":71,"title":["Distribution Network Reconfiguration Based on NoisyNet Deep Q-Learning Network"],"prefix":"10.1109","volume":"9","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1030-3756","authenticated-orcid":false,"given":"Beibei","family":"Wang","sequence":"first","affiliation":[]},{"given":"Hong","family":"Zhu","sequence":"additional","affiliation":[]},{"given":"Honghua","family":"Xu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6076-4892","authenticated-orcid":false,"given":"Yuqing","family":"Bao","sequence":"additional","affiliation":[]},{"given":"Huifang","family":"Di","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1049\/iet-gtd.2019.0507"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TSTE.2020.2980890"},{"key":"ref10","article-title":"Playing atari with deep reinforcement learning","author":"mnih","year":"2013","journal-title":"arXiv 1312 5602"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.3009113"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1049\/iet-gtd.2016.0890"},{"key":"ref12","first-page":"2094","article-title":"Deep reinforcement learning with double Q-learning","author":"hasselt","year":"2016","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1049\/joe.2018.9356"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2918480"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2017.2732353"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2020.3005270"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1049\/iet-gtd.2018.6583"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/9312710\/09455432.pdf?arnumber=9455432","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,12,17]],"date-time":"2021-12-17T19:56:36Z","timestamp":1639770996000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9455432\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021]]},"references-count":12,"URL":"https:\/\/doi.org\/10.1109\/access.2021.3089625","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021]]}}}