{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,8]],"date-time":"2026-04-08T17:02:26Z","timestamp":1775667746624,"version":"3.50.1"},"reference-count":42,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2021,3,1]],"date-time":"2021-03-01T00:00:00Z","timestamp":1614556800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,3,1]],"date-time":"2021-03-01T00:00:00Z","timestamp":1614556800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,3,1]],"date-time":"2021-03-01T00:00:00Z","timestamp":1614556800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["51907063"],"award-info":[{"award-number":["51907063"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["2019MS054"],"award-info":[{"award-number":["2019MS054"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Support Program for the Excellent Talents in Beijing City","award":["X19048"],"award-info":[{"award-number":["X19048"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Smart Grid"],"published-print":{"date-parts":[[2021,3]]},"DOI":"10.1109\/tsg.2020.3025082","type":"journal-article","created":{"date-parts":[[2020,9,21]],"date-time":"2020-09-21T21:23:21Z","timestamp":1600723401000},"page":"1380-1393","source":"Crossref","is-referenced-by-count":148,"title":["Mobility-Aware Charging Scheduling for Shared On-Demand Electric Vehicle Fleet Using Deep Reinforcement Learning"],"prefix":"10.1109","volume":"12","author":[{"given":"Yanchang","family":"Liang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7085-260X","authenticated-orcid":false,"given":"Zhaohao","family":"Ding","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7590-0172","authenticated-orcid":false,"given":"Tao","family":"Ding","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7774-468X","authenticated-orcid":false,"given":"Wei-Jen","family":"Lee","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","article-title":"Weighted sup-norm contractions in dynamic programming: A review and some new applications","author":"bertsekas","year":"2012"},{"key":"ref38","author":"fran\u00e7ois-lavet","year":"2015","journal-title":"How to discount deep reinforcement learning Towards new dynamic strategies"},{"key":"ref33","year":"2019","journal-title":"PJM Market Data"},{"key":"ref32","author":"lee","year":"2020","journal-title":"Deep reinforcement learning approach to MIMO precoding problem Optimality and robustness"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TPWRS.2020.2999536"},{"key":"ref30","article-title":"An analysis of temporal-difference learning with function approximationtechnical","author":"tsitsiklis","year":"1996"},{"key":"ref37","first-page":"8026","article-title":"Pytorch: An imperative style, high-performance deep learning library","author":"paszke","year":"2019","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref36","year":"2019","journal-title":"Gurobi"},{"key":"ref35","first-page":"6","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2015","journal-title":"Proc 3rd Int Conf Learn Rep (ICLR)"},{"key":"ref34","first-page":"315","article-title":"Deep sparse rectifier neural networks","author":"glorot","year":"2011","journal-title":"Proc 14th Int Conf Artif Intell Stat"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3357384.3357978"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1994.6.6.1185"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2019.2947408"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3292500.3330724"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/3308558.3313433"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1137\/0105003"},{"key":"ref15","author":"yang","year":"2018","journal-title":"Mean field multi-agent reinforcement learning"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1145\/3219819.3219993"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2015.2393059"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TIA.2020.2990096"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2018.2879572"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/BF00115009"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2018.8569459"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(99)00052-1"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2019.12.020"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1038\/nature16961"},{"key":"ref29","article-title":"Reinforcement learning for robots using neural networks","author":"lin","year":"1993"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/3219819.3219824"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1038\/nature24270"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487272"},{"key":"ref9","year":"2019","journal-title":"Data Source Didi Chuxing GAIA Initiative"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TCNS.2019.2923384"},{"key":"ref20","author":"turan","year":"2019","journal-title":"Dynamic pricing and management for electric autonomous mobility on demand systems using reinforcement learning"},{"key":"ref22","author":"haarnoja","year":"2018","journal-title":"Soft actor-critic Off-policy maximum entropy deep reinforcement learning with a stochastic actor"},{"key":"ref21","author":"lillicrap","year":"2015","journal-title":"Continuous control with deep reinforcement learning"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1007\/BF03026576"},{"key":"ref24","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1162\/089976699300016070"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-335-6.50027-1"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2018.2882861"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1016\/j.ecolmodel.2007.03.041"}],"container-title":["IEEE Transactions on Smart Grid"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5165411\/9364758\/09201039.pdf?arnumber=9201039","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T14:53:13Z","timestamp":1652194393000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9201039\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,3]]},"references-count":42,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tsg.2020.3025082","relation":{},"ISSN":["1949-3053","1949-3061"],"issn-type":[{"value":"1949-3053","type":"print"},{"value":"1949-3061","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,3]]}}}