{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T01:17:13Z","timestamp":1740100633666,"version":"3.37.3"},"reference-count":27,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,2,13]],"date-time":"2022-02-13T00:00:00Z","timestamp":1644710400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,2,13]],"date-time":"2022-02-13T00:00:00Z","timestamp":1644710400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001321","name":"National Research Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001321","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100009950","name":"Ministry of Education","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100009950","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,2,13]]},"DOI":"10.23919\/icact53585.2022.9728866","type":"proceedings-article","created":{"date-parts":[[2022,3,11]],"date-time":"2022-03-11T20:27:02Z","timestamp":1647030422000},"page":"160-166","source":"Crossref","is-referenced-by-count":0,"title":["Reinforcement Learning base DR Method for ESS SoC Optimization and Users Satisfaction"],"prefix":"10.23919","author":[{"given":"Yong Hee","family":"Park","sequence":"first","affiliation":[{"name":"Chungbuk National University,Information &#x0026; communication Engineering,Chungcheongbuk-do,Korea"}]},{"given":"Seong Gon","family":"Choi","sequence":"additional","affiliation":[{"name":"Chungbuk National University,Information &#x0026; communication Engineering,Chungcheongbuk-do,Korea"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/59.496144"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TII.2011.2172454"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1049\/ip-smt:19951929"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/JAS.2017.7510739"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/InfoSEEE.2014.6946259"},{"key":"ref15","first-page":"607","article-title":"A complexity analysis of cooperative mechanismsin reinforcement learning","author":"whitehead","year":"1991","journal-title":"AAAI"},{"key":"ref16","article-title":"Learning Representations in Model-Free Hierarchical Reinforcement Learning","author":"rafati","year":"2018","journal-title":"Learning Representations in Reinforcement Learning"},{"key":"ref17","article-title":"Playing Atari with Deep Reinforcement Learning","author":"mnih","year":"0","journal-title":"Neural Information Processing Systems 2013"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.23919\/ICACT48636.2020.9061279"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/NAPS50074.2021.9449714"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TPWRS.2011.2161350"},{"year":"0","key":"ref27","article-title":"marl - Multi-agent reinforcement learning framework, Github"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1541\/ieejjournal.129.16"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TPEL.2017.2733019"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TSTE.2018.2870561"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TII.2015.2431219"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2937623"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/GTDAsia.2019.8715993"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/MPE.2010.936353"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2019.2907650"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2015.2396993"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-27645-3_1"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/IKT51791.2020.9345625"},{"key":"ref24","article-title":"Diving deeper into Reinforcement Learning with Q-Learning","author":"simonini","year":"2018","journal-title":"Freecode"},{"key":"ref23","article-title":"Markovian sequential decision-making in non-stationary environments: application to argumentative debates","author":"maudet","year":"2015","journal-title":"HAL"},{"year":"0","key":"ref26","article-title":"CAN-Data, Adaptive Charging Network"},{"journal-title":"The UMass website","article-title":"Smart Stat Project","year":"2019","key":"ref25"}],"event":{"name":"2022 24th International Conference on Advanced Communication Technology (ICACT)","start":{"date-parts":[[2022,2,13]]},"location":"PyeongChang Kwangwoon_Do, Korea, Republic of","end":{"date-parts":[[2022,2,16]]}},"container-title":["2022 24th International Conference on Advanced Communication Technology (ICACT)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9728250\/9728768\/09728866.pdf?arnumber=9728866","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T20:47:02Z","timestamp":1658177222000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9728866\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,2,13]]},"references-count":27,"URL":"https:\/\/doi.org\/10.23919\/icact53585.2022.9728866","relation":{},"subject":[],"published":{"date-parts":[[2022,2,13]]}}}