{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,18]],"date-time":"2026-03-18T20:39:38Z","timestamp":1773866378106,"version":"3.50.1"},"reference-count":87,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"15","license":[{"start":{"date-parts":[[2023,8,1]],"date-time":"2023-08-01T00:00:00Z","timestamp":1690848000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,8,1]],"date-time":"2023-08-01T00:00:00Z","timestamp":1690848000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,8,1]],"date-time":"2023-08-01T00:00:00Z","timestamp":1690848000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Nordic5Tech Ph.D. Fellowship"},{"name":"Reinforcing the Health Data Infrastructure in Mobility and Assurance through Data Democratization Project funded by the Norwegian Research Council","award":["288856"],"award-info":[{"award-number":["288856"]}]},{"DOI":"10.13039\/100012774","name":"Flexible Energy Denmark Project funded by Innovation Fund Denmark","doi-asserted-by":"publisher","award":["8090-00069B"],"award-info":[{"award-number":["8090-00069B"]}],"id":[{"id":"10.13039\/100012774","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Internet Things J."],"published-print":{"date-parts":[[2023,8,1]]},"DOI":"10.1109\/jiot.2023.3263261","type":"journal-article","created":{"date-parts":[[2023,3,30]],"date-time":"2023-03-30T17:37:32Z","timestamp":1680197852000},"page":"13876-13894","source":"Crossref","is-referenced-by-count":21,"title":["Data Center HVAC Control Harnessing Flexibility Potential via Real-Time Pricing Cost Optimization Using Reinforcement Learning"],"prefix":"10.1109","volume":"10","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3221-8090","authenticated-orcid":false,"given":"Marco","family":"Biemann","sequence":"first","affiliation":[{"name":"Department of Technology, Management and Economics, Section of Energy Economics and Modelling, Technical University of Denmark, Lyngby, Denmark"}]},{"given":"Philipp Andreas","family":"Gunkel","sequence":"additional","affiliation":[{"name":"Department of Technology, Management and Economics, Section of Energy Economics and Modelling, Technical University of Denmark, Lyngby, Denmark"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7676-1143","authenticated-orcid":false,"given":"Fabian","family":"Scheller","sequence":"additional","affiliation":[{"name":"Faculty of Business and Engineering, Technical University of Applied Sciences W&#x00FC;rzburg-Schweinfurt, W&#x00FC;rzburg, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9387-7650","authenticated-orcid":false,"given":"Lizhen","family":"Huang","sequence":"additional","affiliation":[{"name":"Department of Manufacturing and Civil Engineering, Norwegian University of Science and Technology, Trondheim, Norway"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5133-6688","authenticated-orcid":false,"given":"Xiufeng","family":"Liu","sequence":"additional","affiliation":[{"name":"Department of Technology, Management and Economics, Section of Energy Economics and Modelling, Technical University of Denmark, Lyngby, Denmark"}]}],"member":"263","reference":[{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-13-2853-4_4"},{"key":"ref57","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","volume":"80","author":"fujimoto","year":"2018","journal-title":"Proc Int Conf Mach Learn Res"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2021.118346"},{"key":"ref56","first-page":"2775","article-title":"Bridging the gap between value and policy based reinforcement learning","author":"nachum","year":"2017","journal-title":"Proc Int Conf Adv Neural Inf Process Syst"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2021.117164"},{"key":"ref59","first-page":"3803","article-title":"Sim-toreal transfer of robotic control with dynamics randomization","author":"peng","year":"2018","journal-title":"Proc IEEE Int Conf Robot Autom (ICRA)"},{"key":"ref14","first-page":"1861","article-title":"Soft actor-critic: Offpolicy maximum entropy deep reinforcement learning with a stochastic actor","author":"haarnoja","year":"2018","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref58","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1016\/j.egyai.2021.100101"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2022.3152586"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2021.117733"},{"key":"ref55","first-page":"1","article-title":"Maximum entropy RL (provably) solves some robust RL problems","author":"eysenbach","year":"2022","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2020.115036"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1016\/j.energy.2021.120725"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1002\/9781119815068"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.3390\/en8088300"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2021.3088290"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2019.2957289"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2020.3042498"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1016\/j.enbuild.2022.111903"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2020.116117"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2016.2517211"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3126365"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2022.3175728"},{"key":"ref86","first-page":"1","article-title":"Stable-Baselines3: Reliable reinforcement learning implementations","volume":"22","author":"raffin","year":"2021","journal-title":"J Mach Learn Res"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1016\/j.energy.2022.125187"},{"key":"ref85","first-page":"1","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2015","journal-title":"Proc ICLR"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2018.11.002"},{"key":"ref43","first-page":"6000","article-title":"Attention is all you need","volume":"30","author":"vaswani","year":"2017","journal-title":"Proc Int Conf Adv Neural Inf Process Syst"},{"key":"ref87","author":"\u00e5str\u00f6m","year":"2006","journal-title":"Advanced PID Control"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.3389\/fenrg.2020.610518"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.ijforecast.2014.08.008"},{"key":"ref7","first-page":"3818","article-title":"Data center cooling using model-predictive control","volume":"31","author":"lazic","year":"2018","journal-title":"Proc Int Conf Adv Neural Inf Process Syst"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2021.116983"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2018.2802704"},{"key":"ref3","year":"2022","journal-title":"Danskerne bruger str&#x00F8;mmen n&#x00E5;r den er billig Fire ud af 10 har &#x00E6;ndret deres energiforbrug"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.arcontrol.2020.09.001"},{"key":"ref5","year":"2021","journal-title":"Dynamic retail energy prices"},{"key":"ref82","first-page":"972","article-title":"Selfnormalizing neural networks","volume":"30","author":"klambauer","year":"2017","journal-title":"Proc Int Conf Adv Neural Inf Process Syst"},{"key":"ref81","first-page":"1","article-title":"Maximum a posteriori policy optimisation","author":"abdolmaleki","year":"2018","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/TIE.2021.3104596"},{"key":"ref84","article-title":"Layer normalization","author":"ba","year":"2016","journal-title":"arXiv 1607 06450"},{"key":"ref83","first-page":"1","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2016","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref80","first-page":"1","article-title":"Prioritized experience replay","author":"schaul","year":"2016","journal-title":"Proc ICLR"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1145\/3360322.3360861"},{"key":"ref79","first-page":"13544","article-title":"RUDDER: Return decomposition for delayed rewards","volume":"32","author":"arjona-medina","year":"2019","journal-title":"Proc Int Conf Adv Neural Inf Process Syst"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1016\/j.enbuild.2022.112584"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1016\/j.buildenv.2021.108680"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2940005"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.3390\/pr5030046"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2021.117642"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1038\/s41467-019-13073-w"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1016\/j.buildenv.2019.106535"},{"key":"ref74","first-page":"316","article-title":"An intelligent battery controller using biascorrected Q-learning","author":"lee","year":"2012","journal-title":"Proc 26th AAAI Conf Artif Intell"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2022.3164023"},{"key":"ref77","first-page":"1","article-title":"Highdimensional continuous control using generalized advantage estimation","author":"schulman","year":"2016","journal-title":"Proc Int Conf Learn Represent (ICLR)"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1016\/j.energy.2022.125290"},{"key":"ref76","first-page":"7654","article-title":"Counterfactual credit assignment in model-free reinforcement learning","author":"mesnard","year":"2021","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref2","year":"2018","journal-title":"The Future of Cooling Opportunities for Energy-Efficient Air Conditioning"},{"key":"ref1","year":"2021","journal-title":"2021 Global Status Report for Buildings and Construction Towards a Zero-Emission Efficient and Resilient Buildings and Construction Sector"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1145\/3486611.3488730"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2021.118403"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1145\/2254756.2254778"},{"key":"ref70","author":"radovanovic","year":"2020","journal-title":"Our data centers now work harder when the sun shines and wind blows"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2897898"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1016\/j.enbuild.2020.110345"},{"key":"ref24","doi-asserted-by":"crossref","first-page":"eabk2822","DOI":"10.1126\/scirobotics.abk2822","article-title":"Learning robust perceptive locomotion for quadrupedal robots in the wild","volume":"7","author":"miki","year":"2022","journal-title":"Sci Robot"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1007\/s12053-018-9753-2"},{"key":"ref23","first-page":"1","article-title":"Recurrent experience replay in distributed reinforcement learning","author":"kapturowski","year":"2018","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/IGCC.2014.7039172"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2020.114945"},{"key":"ref25","doi-asserted-by":"crossref","first-page":"174","DOI":"10.1016\/0022-247X(65)90154-X","article-title":"Optimal control of Markov processes with incomplete state information","volume":"10","author":"\u00e5str\u00f6m","year":"1965","journal-title":"J Math Anal Appl"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1145\/2637364.2592004"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1179"},{"key":"ref64","year":"2021","journal-title":"Thermal Guidelines for Data Processing Environments"},{"key":"ref63","year":"2021","journal-title":"High energy prices"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-019-1724-z"},{"key":"ref66","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017","journal-title":"arXiv 1707 06347"},{"key":"ref21","article-title":"Dota 2 with large scale deep reinforcement learning","author":"berner","year":"2019","journal-title":"arXiv 1912 06680"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11694"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1016\/j.egyai.2022.100139"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2020.2986333"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1016\/j.autcon.2022.104128"},{"key":"ref60","first-page":"16691","article-title":"Recurrent model-free RL can be a strong baseline for many POMDPs","author":"ni","year":"2022","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1016\/j.enbuild.2020.110603"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2019.2927410"}],"container-title":["IEEE Internet of Things Journal"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6488907\/10194321\/10089168.pdf?arnumber=10089168","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,8,14]],"date-time":"2023-08-14T18:00:11Z","timestamp":1692036011000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10089168\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,8,1]]},"references-count":87,"journal-issue":{"issue":"15"},"URL":"https:\/\/doi.org\/10.1109\/jiot.2023.3263261","relation":{},"ISSN":["2327-4662","2372-2541"],"issn-type":[{"value":"2327-4662","type":"electronic"},{"value":"2372-2541","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,8,1]]}}}