{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,22]],"date-time":"2025-10-22T17:58:17Z","timestamp":1761155897099,"version":"3.43.0"},"reference-count":30,"publisher":"IEEE","license":[{"start":{"date-parts":[[2014,12,1]],"date-time":"2014-12-01T00:00:00Z","timestamp":1417392000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2014,12,1]],"date-time":"2014-12-01T00:00:00Z","timestamp":1417392000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2014,12]]},"DOI":"10.1109\/adprl.2014.7010630","type":"proceedings-article","created":{"date-parts":[[2015,1,19]],"date-time":"2015-01-19T21:48:03Z","timestamp":1421704083000},"page":"1-7","source":"Crossref","is-referenced-by-count":2,"title":["Optimal self-learning battery control in smart residential grids by iterative Q-learning algorithm"],"prefix":"10.1109","author":[{"given":"Qinglai","family":"Wei","sequence":"first","affiliation":[{"name":"The State Key Laboratory of Management and Control for Complex Systems, Institute of Automation, Chinese Academy of Sciences"}]},{"given":"Derong","family":"Liu","sequence":"additional","affiliation":[{"name":"The State Key Laboratory of Management and Control for Complex Systems, Institute of Automation, Chinese Academy of Sciences"}]},{"given":"Guang","family":"Shi","sequence":"additional","affiliation":[{"name":"The State Key Laboratory of Management and Control for Complex Systems, Institute of Automation, Chinese Academy of Sciences"}]},{"given":"Yu","family":"Liu","sequence":"additional","affiliation":[{"name":"Institute of Automation, Chinese Academy of Sciences"}]},{"given":"Qiang","family":"Guan","sequence":"additional","affiliation":[{"name":"Institute of Automation, Chinese Academy of Sciences"}]}],"member":"263","reference":[{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2012.2196708"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2012.02.027"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/s00521-013-1361-7"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/s00521-012-1188-7"},{"journal-title":"Learning from delayed rewards","year":"1989","author":"watkins","key":"ref13"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TFUZZ.2008.925910"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2013.2263201"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/72.623201"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/MCS.2012.2214134"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/s00521-011-0711-6"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCB.2008.926614"},{"key":"ref4","doi-asserted-by":"crossref","first-page":"779","DOI":"10.1109\/TSMCB.2012.2216523","article-title":"Finite-approximation-error-based optimal control approach for discrete-time nonlinear systems","volume":"43","author":"liu","year":"2013","journal-title":"IEEE Transactions on Cybernetics"},{"journal-title":"Dynamic Programming","year":"1957","author":"bellman","key":"ref27"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2012.2227339"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TASE.2013.2280974"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2006.878720"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2013.2281663"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TASE.2013.2284545"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1049\/iet-cta.2012.0486"},{"key":"ref2","doi-asserted-by":"crossref","first-page":"67","DOI":"10.7551\/mitpress\/4939.003.0007","article-title":"A menu of designs for reinforcement learning over time","author":"werbos","year":"1991","journal-title":"Neural Networks for Control"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TIE.2014.2301770"},{"key":"ref1","first-page":"25","article-title":"Advanced forecasting methods for global crisis warning and models of intelligence","volume":"22","author":"werbos","year":"1977","journal-title":"General Systems Yearbook"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/72.914523"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1016\/j.ijepes.2012.11.023"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/s12559-012-9191-y"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TPAS.1981.316866"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TEC.2006.878239"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2012.2196708"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2006.878720"}],"event":{"name":"2014 IEEE Symposium on Adaptive Dynamic Programming and Reinforcement Learning (ADPRL)","start":{"date-parts":[[2014,12,9]]},"location":"Orlando, FL, USA","end":{"date-parts":[[2014,12,12]]}},"container-title":["2014 IEEE Symposium on Adaptive Dynamic Programming and Reinforcement Learning (ADPRL)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7000183\/7010603\/07010630.pdf?arnumber=7010630","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,8]],"date-time":"2025-08-08T18:36:21Z","timestamp":1754678181000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/7010630\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2014,12]]},"references-count":30,"URL":"https:\/\/doi.org\/10.1109\/adprl.2014.7010630","relation":{},"subject":[],"published":{"date-parts":[[2014,12]]}}}