{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T08:09:20Z","timestamp":1774512560949,"version":"3.50.1"},"reference-count":37,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"5","license":[{"start":{"date-parts":[[2021,9,1]],"date-time":"2021-09-01T00:00:00Z","timestamp":1630454400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,9,1]],"date-time":"2021-09-01T00:00:00Z","timestamp":1630454400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,9,1]],"date-time":"2021-09-01T00:00:00Z","timestamp":1630454400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Tsinghua-Toyota Joint Research Institute Cross-discipline Program"},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities of China","doi-asserted-by":"publisher","award":["B200201071"],"award-info":[{"award-number":["B200201071"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Smart Grid"],"published-print":{"date-parts":[[2021,9]]},"DOI":"10.1109\/tsg.2021.3088290","type":"journal-article","created":{"date-parts":[[2021,6,10]],"date-time":"2021-06-10T19:56:16Z","timestamp":1623354976000},"page":"4079-4089","source":"Crossref","is-referenced-by-count":93,"title":["Privacy Preserving Load Control of Residential Microgrid via Deep Reinforcement Learning"],"prefix":"10.1109","volume":"12","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1399-6292","authenticated-orcid":false,"given":"Zhaoming","family":"Qin","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5052-5663","authenticated-orcid":false,"given":"Di","family":"Liu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3341-2947","authenticated-orcid":false,"given":"Haochen","family":"Hua","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5219-9766","authenticated-orcid":false,"given":"Junwei","family":"Cao","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref33","first-page":"165","article-title":"Potential-based difference rewards for multiagent reinforcement learning","author":"devlin","year":"2014","journal-title":"Proc Int Conf Auton Agents Multi-Agent Syst"},{"key":"ref32","first-page":"2974","article-title":"Counterfactual multi-agent policy gradients","author":"foerster","year":"2018","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"ref31","first-page":"387","article-title":"Deterministic policy gradient algorithms","author":"silver","year":"2014","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8463162"},{"key":"ref37","author":"andrychowicz","year":"2020","journal-title":"What matters in on-policy reinforcement learning? a large-scale empirical study"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1049\/iet-gtd.2016.0772"},{"key":"ref35","author":"chung","year":"2014","journal-title":"Empirical evaluation of gated recurrent neural networks on sequence modeling"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1179"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2017.2682340"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2020.114489"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2016.2633416"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2017.2707103"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2014.2387202"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2017.2661991"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2018.2887232"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TPWRS.2016.2628055"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2015.2412091"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2019.2933502"},{"key":"ref28","year":"2020","journal-title":"Pecan street database"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1038\/s41560-017-0075-y"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2016.2582701"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TASE.2015.2497300"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TPWRS.2015.2391774"},{"key":"ref29","year":"2020","journal-title":"NOAA Data"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2014.2357211"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2019.2957289"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2019.2915679"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TII.2018.2867373"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2020.116117"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TIE.2018.2811403"},{"key":"ref20","first-page":"29","article-title":"Deep recurrent Q-learning for partially observable MDPs","author":"matthew","year":"2015","journal-title":"Proc AAAI Fall Symp Sequential Decis Making Intell Agents"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2020.2967430"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2018.2879572"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2020.2966232"},{"key":"ref23","first-page":"10","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2016","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2018.2834219"},{"key":"ref25","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"volodymyr","year":"2016","journal-title":"Proc Int Conf Mach Learn"}],"container-title":["IEEE Transactions on Smart Grid"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5165411\/9519817\/09451164.pdf?arnumber=9451164","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T14:53:15Z","timestamp":1652194395000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9451164\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,9]]},"references-count":37,"journal-issue":{"issue":"5"},"URL":"https:\/\/doi.org\/10.1109\/tsg.2021.3088290","relation":{},"ISSN":["1949-3053","1949-3061"],"issn-type":[{"value":"1949-3053","type":"print"},{"value":"1949-3061","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,9]]}}}