{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,9]],"date-time":"2026-03-09T23:09:56Z","timestamp":1773097796811,"version":"3.50.1"},"reference-count":58,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"9","license":[{"start":{"date-parts":[[2025,9,1]],"date-time":"2025-09-01T00:00:00Z","timestamp":1756684800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,9,1]],"date-time":"2025-09-01T00:00:00Z","timestamp":1756684800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,9,1]],"date-time":"2025-09-01T00:00:00Z","timestamp":1756684800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100012166","name":"\u201cPioneer\u201d and \u201cLeading Goose\u201d Research and Development Program of Zhejiang","doi-asserted-by":"publisher","award":["2023C01240"],"award-info":[{"award-number":["2023C01240"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["52172334"],"award-info":[{"award-number":["52172334"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["52131202"],"award-info":[{"award-number":["52131202"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Intell. Transport. Syst."],"published-print":{"date-parts":[[2025,9]]},"DOI":"10.1109\/tits.2025.3586939","type":"journal-article","created":{"date-parts":[[2025,7,31]],"date-time":"2025-07-31T18:32:45Z","timestamp":1753986765000},"page":"13201-13216","source":"Crossref","is-referenced-by-count":1,"title":["A Centralized Reinforcement Learning-Based Method for Traffic Signal Optimization Using an Adaptive Sequential Decision"],"prefix":"10.1109","volume":"26","author":[{"ORCID":"https:\/\/orcid.org\/0009-0004-6139-5402","authenticated-orcid":false,"given":"Chengrui","family":"Fan","sequence":"first","affiliation":[{"name":"Institute of Intelligent Transportation Systems, Zhejiang University, Hangzhou, China"}]},{"given":"Fujian","family":"Wang","sequence":"additional","affiliation":[{"name":"Institute of Intelligent Transportation Systems, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-0264-7526","authenticated-orcid":false,"given":"Bin","family":"Zhou","sequence":"additional","affiliation":[{"name":"Institute of Intelligent Transportation Systems, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9334-1570","authenticated-orcid":false,"given":"Dongfang","family":"Ma","sequence":"additional","affiliation":[{"name":"State Key Laboratory of Ocean Sensing and Ocean College, Zhejiang University, Zhoushan, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2008.10.002"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2015.2399303"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2018.2849029"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2018.2883572"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2018.2815182"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2019.2909390"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1016\/j.trb.2023.102787"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.ejor.2023.04.012"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1080\/15472450.2019.1643723"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.physa.2022.127708"},{"key":"ref11","first-page":"1151","article-title":"Multi-agent reinforcement leraning for traffic light control","volume-title":"Proc. 17th Int. Conf. Mach. Learn.","author":"Wiering"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1139\/l03-014"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2010.5625066"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2022.103670"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2023.3242678"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1061\/(asce)0733-947x(2003)129:3(278)"},{"key":"ref17","article-title":"Playing Atari with deep reinforcement learning","author":"Mnih","year":"2013","journal-title":"arXiv:1312.5602"},{"key":"ref18","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv:1707.06347"},{"key":"ref19","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Mnih"},{"key":"ref20","article-title":"Continuous control with deep reinforcement learning","author":"Lillicrap","year":"2015","journal-title":"arXiv:1509.02971"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-56991-8_32"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/JAS.2016.7508798"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2022.3141730"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1049\/iet-its.2017.0153"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2021.3107258"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2023.3305548"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/SOLI.2012.6273526"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2013.2255286"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2017.09.020"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1145\/3292500.3330949"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1049\/itr2.12354"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2023.07.027"},{"key":"ref33","first-page":"1046","article-title":"Trust region policy optimisation in multi-agent reinforcement learning","volume-title":"Proc. 10th Int. Conf. Learn. Represent.","author":"Kuba"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1080\/15472450.2017.1387546"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1080\/15472450.2018.1527694"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2022.103955"},{"key":"ref37","article-title":"Semi-supervised classification with graph convolutional networks","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Kipf"},{"key":"ref38","article-title":"Graph attention networks","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Veli\u010dkovi\u0107"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2018.8569301"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1145\/3357384.3357902"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2021.3070835"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2023.104281"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-27645-3_14"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-71682-4_5"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2022.3216203"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5744"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-56994-9_44"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2020.3035841"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN48605.2020.9206820"},{"key":"ref50","first-page":"2085","article-title":"Value-decomposition networks for cooperative multi-agent learning based on team reward","volume-title":"Proc. 17th Int. Conf. Auto. Agents MultiAgent Syst.","author":"Sunehag"},{"key":"ref51","first-page":"4295","article-title":"QMix: Monotonic value function factorisation for deep multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Rashid"},{"key":"ref52","first-page":"5887","article-title":"QTRAN: Learning to factorize with transformation for cooperative multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Son"},{"key":"ref53","article-title":"Heterogeneous-agent reinforcement learning","author":"Zhong","year":"2023","journal-title":"arXiv:2304.09870"},{"key":"ref54","first-page":"16509","article-title":"Multi-agent reinforcement learning is a sequence modeling problem","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Wen"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2021.3067057"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2019.2901791"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref58","first-page":"24611","article-title":"The surprising effectiveness of PPO in cooperative, multi-agent games","volume-title":"Proc. 36th Int. Conf. Neural Inf. Process. Syst.","author":"Yu"}],"container-title":["IEEE Transactions on Intelligent Transportation Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6979\/11178161\/11106239.pdf?arnumber=11106239","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T05:49:00Z","timestamp":1759297740000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11106239\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9]]},"references-count":58,"journal-issue":{"issue":"9"},"URL":"https:\/\/doi.org\/10.1109\/tits.2025.3586939","relation":{},"ISSN":["1524-9050","1558-0016"],"issn-type":[{"value":"1524-9050","type":"print"},{"value":"1558-0016","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,9]]}}}