{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,17]],"date-time":"2026-04-17T04:08:54Z","timestamp":1776398934158,"version":"3.51.2"},"reference-count":34,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/501100013099","name":"Scientific Research Fund of Liaoning Provincial Education Department","doi-asserted-by":"publisher","award":["LQGD2019005"],"award-info":[{"award-number":["LQGD2019005"]}],"id":[{"id":"10.13039\/501100013099","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100015636","name":"Doctoral Start-up Foundation of Liaoning Province","doi-asserted-by":"publisher","award":["2020-BS-141"],"award-info":[{"award-number":["2020-BS-141"]}],"id":[{"id":"10.13039\/501100015636","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2021]]},"DOI":"10.1109\/access.2021.3114335","type":"journal-article","created":{"date-parts":[[2021,9,20]],"date-time":"2021-09-20T20:26:14Z","timestamp":1632169574000},"page":"139685-139696","source":"Crossref","is-referenced-by-count":25,"title":["Optimal Scheduling Framework of Electricity-Gas-Heat Integrated Energy System Based on Asynchronous Advantage Actor-Critic Algorithm"],"prefix":"10.1109","volume":"9","author":[{"given":"Jian","family":"Dong","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2315-6465","authenticated-orcid":false,"given":"Haixin","family":"Wang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7890-7206","authenticated-orcid":false,"given":"Junyou","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Xinyi","family":"Lu","sequence":"additional","affiliation":[]},{"given":"Liu","family":"Gao","sequence":"additional","affiliation":[]},{"given":"Xiran","family":"Zhou","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1016\/j.scs.2020.102230"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/POWERCON.2018.8601988"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2963463"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/YAC.2019.8787657"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TCST.2018.2837097"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TSTE.2013.2274818"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.enbuild.2014.10.019"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.3390\/en9070499"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2019.03.205"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TEC.2014.2352554"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2016.01.014"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.35833\/MPCE.2019.000234"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2019.2941498"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2018.2879572"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2019.2930299"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1016\/j.comcom.2019.12.054"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.energy.2021.121392"},{"key":"ref27","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2020.3003399"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TSTE.2020.2989793"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2020.2981320"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2018.2849619"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2018.11.058"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3054532"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TPWRS.2019.2951719"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.enconman.2015.09.066"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2020.114879"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2020.2977374"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.17775\/CSEEJPES.2019.02890"},{"key":"ref21","first-page":"2469","article-title":"Deep reinforcement learning for sequence-to-sequence models","volume":"31","author":"keneshloo","year":"2020","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1016\/j.enbuild.2020.110225"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.3390\/en12122291"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2924577"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2020.2996274"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/9312710\/09541396.pdf?arnumber=9541396","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,12,17]],"date-time":"2021-12-17T19:55:41Z","timestamp":1639770941000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9541396\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021]]},"references-count":34,"URL":"https:\/\/doi.org\/10.1109\/access.2021.3114335","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021]]}}}