{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,23]],"date-time":"2025-09-23T14:09:29Z","timestamp":1758636569713,"version":"3.28.0"},"reference-count":37,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2018,6]]},"DOI":"10.23919\/acc.2018.8431014","type":"proceedings-article","created":{"date-parts":[[2018,8,17]],"date-time":"2018-08-17T16:16:10Z","timestamp":1534522570000},"page":"3945-3950","source":"Crossref","is-referenced-by-count":1,"title":["Approximate Dynamic Programming for Building Control Problems with Occupant Interactions"],"prefix":"10.23919","author":[{"given":"Donghwan","family":"Lee","sequence":"first","affiliation":[]},{"given":"Seungjae","family":"Lee","sequence":"additional","affiliation":[]},{"given":"Panagiota","family":"Karava","sequence":"additional","affiliation":[]},{"given":"Jianghai","family":"Hu","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1561\/2200000042"},{"key":"ref32","first-page":"317","article-title":"Neural fitted Q iteration&#x2013;first experiences with a data efficient neural reinforcement learning method","author":"riedmiller","year":"2005","journal-title":"European Conference on Machine Learning"},{"journal-title":"Dueling network architectures for deep reinforcement learning","year":"2015","author":"wang","key":"ref31"},{"key":"ref30","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"journal-title":"Approximate dynamic programming for building control problems with occupant interactions","year":"2018","author":"lee","key":"ref37"},{"journal-title":"Energyplustm 8 7 0 documentation","year":"0","key":"ref36"},{"key":"ref35","volume":"1","author":"bertsekas","year":"1995","journal-title":"Dynamic Programming and Optimal Control"},{"key":"ref34","first-page":"154","article-title":"Bayes meets Bellman: The Gaussian process approach to temporal difference learning","volume":"20","author":"engel","year":"0","journal-title":"ICML"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TCST.2011.2124461"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2012.6426251"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ACC.2012.6315347"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ACC.2010.5530680"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.enbuild.2011.09.022"},{"key":"ref15","doi-asserted-by":"crossref","first-page":"502","DOI":"10.1109\/TRO.2010.2044948","article-title":"A probabilistic particle-control approximation of chance-constrained stochastic predictive control","volume":"26","author":"blackmore","year":"2010","journal-title":"IEEE Transactions on Robotics"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2013.02.060"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/6979.898224"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CoASE.2013.6654024"},{"journal-title":"Neuro-Dynamic Programming","year":"1996","author":"bertsekas","key":"ref19"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-011-5235-x"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2012.06.014"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1080\/10789669.2003.10391069"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.enbuild.2007.01.018"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.enbuild.2006.12.005"},{"journal-title":"Deep recurrent Q-learning for partially observable MDPSs","year":"2015","author":"hausknecht","key":"ref29"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/j.enbuild.2014.07.051"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.buildenv.2017.03.009"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1016\/j.buildenv.2010.06.011"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2011.2161242"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/MCS.2011.2172532"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.rser.2008.09.015"},{"journal-title":"Reinforcement Learning An Introduction","year":"1998","author":"sutton","key":"ref20"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1016\/j.enbuild.2005.06.001"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2015.07.050"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1016\/j.enbuild.2016.05.067"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1016\/j.conengprac.2010.01.018"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2006.884887"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2007.06.002"}],"event":{"name":"2018 Annual American Control Conference (ACC)","start":{"date-parts":[[2018,6,27]]},"location":"Milwaukee, WI","end":{"date-parts":[[2018,6,29]]}},"container-title":["2018 Annual American Control Conference (ACC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8410068\/8430677\/08431014.pdf?arnumber=8431014","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2020,8,23]],"date-time":"2020-08-23T22:27:28Z","timestamp":1598221648000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8431014\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018,6]]},"references-count":37,"URL":"https:\/\/doi.org\/10.23919\/acc.2018.8431014","relation":{},"subject":[],"published":{"date-parts":[[2018,6]]}}}