{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,23]],"date-time":"2026-01-23T15:31:12Z","timestamp":1769182272978,"version":"3.49.0"},"reference-count":13,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,9,6]],"date-time":"2022-09-06T00:00:00Z","timestamp":1662422400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,9,6]],"date-time":"2022-09-06T00:00:00Z","timestamp":1662422400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,9,6]]},"DOI":"10.23919\/sice56594.2022.9905836","type":"proceedings-article","created":{"date-parts":[[2022,10,6]],"date-time":"2022-10-06T15:43:18Z","timestamp":1665070998000},"page":"488-493","source":"Crossref","is-referenced-by-count":2,"title":["Switching Policies based on Multi-Objective Reinforcement Learning for Adaptive Traffic Signal Control"],"prefix":"10.23919","author":[{"given":"Takumi","family":"Saiki","sequence":"first","affiliation":[{"name":"Chiba University,Graduate School of Science and Engineering,Chiba,Japan"}]},{"given":"Sachiyo","family":"Arai","sequence":"additional","affiliation":[{"name":"Chiba University,Graduate School of Science and Engineering,Chiba,Japan"}]}],"member":"263","reference":[{"key":"ref10","first-page":"14636","article-title":"A generalized algorithm for multi-objective reinforcement learning and policy adaptation","author":"yang","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref11","article-title":"Playing atari with deep reinforcement learning","author":"mnih","year":"2013"},{"key":"ref12","article-title":"Distributed prioritized experience replay","author":"horgan","year":"2018"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2018.8569938"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5744"},{"key":"ref3","article-title":"Using a deep reinforcement learning agent for traffic signal control","author":"genders","year":"2016"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5744"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3292500.3330949"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.aap.2020.105655"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2017.8317730"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2013.08.014"},{"key":"ref1","article-title":"Self-organizing traffic lights","author":"gershenson","year":"2004"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2019.2906260"}],"event":{"name":"2022 61st Annual Conference of the Society of Instrument and Control Engineers (SICE)","location":"Kumamoto, Japan","start":{"date-parts":[[2022,9,6]]},"end":{"date-parts":[[2022,9,9]]}},"container-title":["2022 61st Annual Conference of the Society of Instrument and Control Engineers (SICE)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9905734\/9905735\/09905836.pdf?arnumber=9905836","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,3]],"date-time":"2022-11-03T21:31:03Z","timestamp":1667511063000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9905836\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,9,6]]},"references-count":13,"URL":"https:\/\/doi.org\/10.23919\/sice56594.2022.9905836","relation":{},"subject":[],"published":{"date-parts":[[2022,9,6]]}}}