{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,2]],"date-time":"2025-11-02T01:44:44Z","timestamp":1762047884800,"version":"build-2065373602"},"reference-count":25,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,12,14]],"date-time":"2021-12-14T00:00:00Z","timestamp":1639440000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,12,14]],"date-time":"2021-12-14T00:00:00Z","timestamp":1639440000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,12,14]]},"DOI":"10.1109\/cdc45484.2021.9683333","type":"proceedings-article","created":{"date-parts":[[2022,2,1]],"date-time":"2022-02-01T20:50:18Z","timestamp":1643748618000},"page":"6365-6370","source":"Crossref","is-referenced-by-count":9,"title":["Optimal Management of the Peak Power Penalty for Smart Grids Using MPC-based Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Wenqi","family":"Cai","sequence":"first","affiliation":[{"name":"Norwegian University of Science and Technology (NTNU),Department of Engineering Cybernetics,Trondheim,Norway"}]},{"given":"Hossein N.","family":"Esfahani","sequence":"additional","affiliation":[{"name":"Norwegian University of Science and Technology (NTNU),Department of Engineering Cybernetics,Trondheim,Norway"}]},{"given":"Arash B.","family":"Kordabad","sequence":"additional","affiliation":[{"name":"Norwegian University of Science and Technology (NTNU),Department of Engineering Cybernetics,Trondheim,Norway"}]},{"given":"Sebastien","family":"Gros","sequence":"additional","affiliation":[{"name":"Norwegian University of Science and Technology (NTNU),Department of Engineering Cybernetics,Trondheim,Norway"}]}],"member":"263","reference":[{"doi-asserted-by":"publisher","key":"ref10","DOI":"10.1109\/SmartGridComm.2011.6102369"},{"year":"2020","journal-title":"Day-ahead power prices of Trondheim Norway during November 2020","key":"ref11"},{"doi-asserted-by":"publisher","key":"ref12","DOI":"10.1109\/JSYST.2018.2855689"},{"key":"ref13","doi-asserted-by":"crossref","first-page":"8","DOI":"10.1016\/j.arcontrol.2018.09.005","article-title":"Reinforcement learning for control: Performance, stability, and deep approximators","volume":"46","author":"bus\u00b8oniu","year":"2018","journal-title":"Annual Reviews in Control"},{"key":"ref14","article-title":"Safe reinforcement learning using robust MPC","author":"zanon","year":"2020","journal-title":"IEEE Transactions on Automatic Control"},{"doi-asserted-by":"publisher","key":"ref15","DOI":"10.1109\/TAC.2019.2913768"},{"doi-asserted-by":"publisher","key":"ref16","DOI":"10.1109\/CCTA48906.2021.9659202"},{"doi-asserted-by":"publisher","key":"ref17","DOI":"10.1016\/j.ifacol.2020.12.1196"},{"doi-asserted-by":"publisher","key":"ref18","DOI":"10.23919\/ECC.2019.8795816"},{"doi-asserted-by":"publisher","key":"ref19","DOI":"10.1109\/CDC45484.2021.9683750"},{"doi-asserted-by":"publisher","key":"ref4","DOI":"10.1109\/SURV.2011.101911.00087"},{"doi-asserted-by":"publisher","key":"ref3","DOI":"10.1016\/j.epsr.2008.04.002"},{"doi-asserted-by":"publisher","key":"ref6","DOI":"10.1016\/j.jprocont.2014.04.014"},{"key":"ref5","article-title":"Power exchange spot market trading in europe: theoretical considerations and empirical evidence","volume":"5","author":"madlener","year":"2002","journal-title":"OSCO-GEN (Optimisation of Cogeneration Systems in a Competitive Market Environment)-Project Deliverable"},{"doi-asserted-by":"publisher","key":"ref8","DOI":"10.3390\/app8030408"},{"doi-asserted-by":"publisher","key":"ref7","DOI":"10.1016\/j.enbuild.2017.04.023"},{"key":"ref2","article-title":"Optimal control of residential energy storage under price fluctuations","author":"hegde","year":"2011","journal-title":"Energy"},{"doi-asserted-by":"publisher","key":"ref9","DOI":"10.1016\/j.epsr.2020.106634"},{"doi-asserted-by":"publisher","key":"ref1","DOI":"10.1145\/2487166.2487168"},{"doi-asserted-by":"publisher","key":"ref20","DOI":"10.1214\/aop\/1176990446"},{"key":"ref22","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation","author":"sutton","year":"2000","journal-title":"Advances in neural information processing systems"},{"doi-asserted-by":"publisher","key":"ref21","DOI":"10.1109\/TPWRS.2014.2344859"},{"key":"ref24","first-page":"1107","article-title":"Least-squares policy iteration","volume":"4","author":"lagoudakis","year":"2003","journal-title":"Journal of Machine Learning Research"},{"key":"ref23","first-page":"i?387","article-title":"Deterministic policy gradient algorithms","author":"silver","year":"0"},{"doi-asserted-by":"publisher","key":"ref25","DOI":"10.23919\/ECC54610.2021.9654852"}],"event":{"name":"2021 60th IEEE Conference on Decision and Control (CDC)","start":{"date-parts":[[2021,12,14]]},"location":"Austin, TX, USA","end":{"date-parts":[[2021,12,17]]}},"container-title":["2021 60th IEEE Conference on Decision and Control (CDC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9682670\/9682776\/09683333.pdf?arnumber=9683333","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,13]],"date-time":"2022-06-13T21:08:14Z","timestamp":1655154494000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9683333\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,12,14]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/cdc45484.2021.9683333","relation":{},"subject":[],"published":{"date-parts":[[2021,12,14]]}}}