{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,25]],"date-time":"2025-10-25T12:43:32Z","timestamp":1761396212962,"version":"3.28.0"},"reference-count":23,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,12,1]],"date-time":"2021-12-01T00:00:00Z","timestamp":1638316800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,12,1]],"date-time":"2021-12-01T00:00:00Z","timestamp":1638316800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,12,1]],"date-time":"2021-12-01T00:00:00Z","timestamp":1638316800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,12]]},"DOI":"10.1109\/globecom46510.2021.9685279","type":"proceedings-article","created":{"date-parts":[[2022,2,2]],"date-time":"2022-02-02T16:59:04Z","timestamp":1643821144000},"page":"01-07","source":"Crossref","is-referenced-by-count":13,"title":["Deep Reinforcement Learning for cell on\/off energy saving on Wireless Networks"],"prefix":"10.1109","author":[{"given":"Joan S.","family":"Pujol-Roigl","sequence":"first","affiliation":[{"name":"Samsung Electronics R&#x0026;D Institute UK,Surrey,UK,TW18 4QE"}]},{"given":"Shangbin","family":"Wu","sequence":"additional","affiliation":[{"name":"Samsung Electronics R&#x0026;D Institute UK,Surrey,UK,TW18 4QE"}]},{"given":"Yue","family":"Wang","sequence":"additional","affiliation":[{"name":"Samsung Electronics R&#x0026;D Institute UK,Surrey,UK,TW18 4QE"}]},{"given":"Minsuk","family":"Choi","sequence":"additional","affiliation":[{"name":"Samsung Research, Seoul R&#x0026;D Campus, Umyeon dong,Seoul,Republic of Korea"}]},{"given":"Intaik","family":"Park","sequence":"additional","affiliation":[{"name":"Samsung Research, Seoul R&#x0026;D Campus, Umyeon dong,Seoul,Republic of Korea"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1155\/2014\/965495"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM.2016.7524485"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TMC.2014.2307322"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TGCN.2019.2931700"},{"journal-title":"3GPP T R 36 927 Potential solutions for energy saving for E-UTRAN","year":"2020","key":"ref14"},{"key":"ref15","first-page":"304","article-title":"Management and orchestration of virtual net-work functions via deep reinforcement learning","author":"roig","year":"2019","journal-title":"IEEE JSAC"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/VTCFall.2018.8690555"},{"key":"ref17","first-page":"1","article-title":"Location-aware sleep strategy for energy-delay tradeoffs in 5G with RL","author":"el-amine","year":"2019","journal-title":"IEEE PIMRC"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.2995057"},{"journal-title":"Addressing Function Approximation Error in Actor-Critic Methods","year":"2018","author":"fujimoto","key":"ref19"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/1851290.1851300"},{"key":"ref3","first-page":"6","article-title":"ICT energy consumption-trends and challenges","volume":"2","author":"fettweis","year":"2008","journal-title":"IEEE SWPMC"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2013.032013.120494"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/IWCMC.2013.6583618"},{"key":"ref8","first-page":"1525","article-title":"Base station operation and user association mecha-nisms for energy-delay tradeoffs in green cellular networks","author":"son","year":"2011","journal-title":"IEEE JSAC"},{"journal-title":"Energy consumption prediction using machine learning A review","year":"2019","author":"mosavi","key":"ref7"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/MNET.2011.5730527"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICOSEC49089.2020.9215362"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2013.130506"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.3233\/KES-2010-0206"},{"key":"ref22","volume":"135","author":"sutton","year":"1998","journal-title":"Introduction to Reinforcement Learning"},{"key":"ref21","first-page":"309","article-title":"Frequency adjusted multi-agent q-learning","volume":"1","author":"kaisers","year":"0","journal-title":"Proceedings of the 9th International Conference on Autonomous Agents and Multiagent Systems Volume 1"},{"key":"ref23","first-page":"249","article-title":"Understanding the difficulty of training deep feedforward neural networks","author":"glorot","year":"0","journal-title":"Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics"}],"event":{"name":"GLOBECOM 2021 - 2021 IEEE Global Communications Conference","start":{"date-parts":[[2021,12,7]]},"location":"Madrid, Spain","end":{"date-parts":[[2021,12,11]]}},"container-title":["2021 IEEE Global Communications Conference (GLOBECOM)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9685019\/9685006\/09685279.pdf?arnumber=9685279","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,8]],"date-time":"2022-07-08T22:21:54Z","timestamp":1657318914000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9685279\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,12]]},"references-count":23,"URL":"https:\/\/doi.org\/10.1109\/globecom46510.2021.9685279","relation":{},"subject":[],"published":{"date-parts":[[2021,12]]}}}