{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,31]],"date-time":"2026-03-31T14:06:54Z","timestamp":1774966014990,"version":"3.50.1"},"reference-count":43,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"6","license":[{"start":{"date-parts":[[2021,11,1]],"date-time":"2021-11-01T00:00:00Z","timestamp":1635724800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,11,1]],"date-time":"2021-11-01T00:00:00Z","timestamp":1635724800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,11,1]],"date-time":"2021-11-01T00:00:00Z","timestamp":1635724800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["51821005"],"award-info":[{"award-number":["51821005"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["51977088"],"award-info":[{"award-number":["51977088"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Smart Grid"],"published-print":{"date-parts":[[2021,11]]},"DOI":"10.1109\/tsg.2021.3098298","type":"journal-article","created":{"date-parts":[[2021,7,26]],"date-time":"2021-07-26T21:47:41Z","timestamp":1627336061000},"page":"5124-5134","source":"Crossref","is-referenced-by-count":170,"title":["Deep Reinforcement Learning for Continuous Electric Vehicles Charging Control With Dynamic User Behaviors"],"prefix":"10.1109","volume":"12","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3016-5415","authenticated-orcid":false,"given":"Linfang","family":"Yan","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8423-7368","authenticated-orcid":false,"given":"Xia","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Jianyu","family":"Zhou","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3351-5065","authenticated-orcid":false,"given":"Yin","family":"Chen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0288-727X","authenticated-orcid":false,"given":"Jinyu","family":"Wen","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","author":"clemen","year":"2013","journal-title":"Making Hard Decisions with DecisionTools"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2020.3028470"},{"key":"ref33","first-page":"3674","article-title":"Reinforcement learning-based plug-in electric vehicle charging with forecasted price","volume":"66","author":"li","year":"2017","journal-title":"IEEE Trans Veh Technol"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TII.2019.2950809"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2019.2920320"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2019.2952331"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2019.2955437"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TIA.2020.2990096"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2020.3015204"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2018.2808247"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TTE.2017.2659626"},{"key":"ref40","year":"2020","journal-title":"California Independent System Operator"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2016.2541305"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2014.2385711"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2017.2789333"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TII.2018.2812755"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TII.2018.2823321"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.jpowsour.2014.01.075"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/j.est.2018.08.023"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2020.2998072"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2020.3008279"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2019.2957289"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2011.2173507"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2018.2834219"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2018.2815689"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2016.2558585"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2018.2879572"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2016.2582749"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1049\/iet-gtd.2013.0624"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2015.2512501"},{"key":"ref2","year":"2020","journal-title":"Global EV Outlook 2020"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TSTE.2015.2498521"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1038\/s41560-017-0074-z"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TII.2020.3003669"},{"key":"ref22","author":"silver","year":"2017","journal-title":"Mastering chess and shogi by self-play with a general reinforcement learning algorithm"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1038\/nature16961"},{"key":"ref42","author":"lillicrap","year":"2015","journal-title":"Continuous control with deep reinforcement learning"},{"key":"ref24","author":"fujimoto","year":"2018","journal-title":"Addressing function approximation error in actor&#x2013;critic methods"},{"key":"ref41","year":"2020","journal-title":"National Household Travel Survey"},{"key":"ref23","first-page":"3215","article-title":"Rainbow: Combining improvements in deep reinforcement learning","author":"hessel","year":"2017","journal-title":"Proc 32nd AAAI Conf Artif Intell (AAAI)"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2020.2978061"},{"key":"ref43","author":"schulman","year":"2017","journal-title":"Proximal policy optimization algorithms"},{"key":"ref25","author":"haarnoja","year":"2018","journal-title":"Soft actor&#x2013;critic algorithms and applications"}],"container-title":["IEEE Transactions on Smart Grid"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5165411\/9582816\/09493711.pdf?arnumber=9493711","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T14:53:18Z","timestamp":1652194398000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9493711\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,11]]},"references-count":43,"journal-issue":{"issue":"6"},"URL":"https:\/\/doi.org\/10.1109\/tsg.2021.3098298","relation":{},"ISSN":["1949-3053","1949-3061"],"issn-type":[{"value":"1949-3053","type":"print"},{"value":"1949-3061","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,11]]}}}