{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T20:27:16Z","timestamp":1776889636845,"version":"3.51.2"},"reference-count":68,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"6","license":[{"start":{"date-parts":[[2024,6,1]],"date-time":"2024-06-01T00:00:00Z","timestamp":1717200000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,6,1]],"date-time":"2024-06-01T00:00:00Z","timestamp":1717200000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,6,1]],"date-time":"2024-06-01T00:00:00Z","timestamp":1717200000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100004731","name":"Natural Science Foundation of Zhejiang Province","doi-asserted-by":"publisher","award":["LR23E080002"],"award-info":[{"award-number":["LR23E080002"]}],"id":[{"id":"10.13039\/501100004731","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["72361137006"],"award-info":[{"award-number":["72361137006"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Alibaba-Zhejiang University Joint Research Institute of Frontier Technologies"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Intell. Transport. Syst."],"published-print":{"date-parts":[[2024,6]]},"DOI":"10.1109\/tits.2023.3344585","type":"journal-article","created":{"date-parts":[[2024,1,2]],"date-time":"2024-01-02T21:26:05Z","timestamp":1704230765000},"page":"5225-5241","source":"Crossref","is-referenced-by-count":28,"title":["Reinforcement Learning for Traffic Signal Control in Hybrid Action Space"],"prefix":"10.1109","volume":"25","author":[{"ORCID":"https:\/\/orcid.org\/0009-0009-2828-1979","authenticated-orcid":false,"given":"Haoqing","family":"Luo","sequence":"first","affiliation":[{"name":"Institute of Intelligent Transportation Systems, College of Civil Engineering and Architecture, and the Center for Balance Architecture, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4651-1570","authenticated-orcid":false,"given":"Yiming","family":"Bie","sequence":"additional","affiliation":[{"name":"School of Transportation, Jilin University, Changchun, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6110-0783","authenticated-orcid":false,"given":"Sheng","family":"Jin","sequence":"additional","affiliation":[{"name":"Institute of Intelligent Transportation Systems, College of Civil Engineering and Architecture, and the Center for Balance Architecture, Zhejiang University, Hangzhou, China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Traffic signal settings","author":"Webster","year":"1958"},{"key":"ref2","first-page":"200","article-title":"A computer control system for traffic networks","volume-title":"Proc. 2nd Int. Symp. Theory Traffic Flow","author":"Miller"},{"key":"ref3","first-page":"8","article-title":"\u2018Transyt\u2019 method for area traffic control","volume":"11","author":"Robertson","year":"1969","journal-title":"Traffic Eng. control"},{"issue":"4","key":"ref4","first-page":"190","article-title":"The SCOOT on-line traffic signal optimisation technique","volume":"23","author":"Hunt","year":"1982","journal-title":"Traffic Eng. Control"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/S1474-6670(17)52677-4"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/S0967-0661(01)00121-6"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref8","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv:1707.06347"},{"key":"ref9","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. ICML","volume":"80","author":"Haarnoja"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.aei.2018.08.002"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/3357384.3357900"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1512\/iumj.1957.6.56038"},{"key":"ref13","article-title":"Parametrized deep Q-networks learning: Reinforcement learning with discrete-continuous hybrid action space","author":"Xiong","year":"2018","journal-title":"arXiv:1810.06394"},{"key":"ref14","first-page":"I-387","article-title":"Deterministic policy gradient algorithms","volume-title":"Proc. 31st Int. Conf. Int. Conf. Mach. Learn.","author":"Silver"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10226"},{"key":"ref16","article-title":"Multi-pass Q-networks for deep reinforcement learning with parameterised action spaces","author":"Bester","year":"2019","journal-title":"arXiv:1905.04388"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/316"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2003.819606"},{"key":"ref19","first-page":"58","article-title":"SIGSET: A computer program for calculating traffic capacity of signal-controlled road junctions","volume":"12","author":"Allsop","year":"1971","journal-title":"Traffic Eng. Control"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1016\/0191-2615(84)90028-6"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/MIS.2005.15"},{"key":"ref22","first-page":"221","article-title":"The oversaturated intersection","volume-title":"Proc. 2nd Int. Symp. Theory Road Traffic Flow","author":"Gazis"},{"key":"ref23","article-title":"Learning from delayed rewards","author":"Watkins","year":"1989"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1061\/(asce)0733-947x(2003)129:3(278)"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2011.6082823"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/JAS.2016.7508798"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"ref28","first-page":"1995","article-title":"Dueling network architectures for deep reinforcement learning","volume-title":"Proc. 33rd Int. Conf. Mach. Learn.","volume":"48","author":"Wang"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2018.2890726"},{"key":"ref30","article-title":"Distributed prioritized experience replay","author":"Horgan","year":"2018","journal-title":"arXiv:1803.00933"},{"key":"ref31","article-title":"Deep deterministic policy gradient for urban traffic light control","author":"Casas","year":"2017","journal-title":"arXiv:1703.09035"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1049\/iet-its.2017.0153"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1145\/3357384.3358079"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2022.103728"},{"key":"ref35","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"48","author":"Mnih"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1145\/3357384.3357902"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2022.3150977"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2020.2987917"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2019.2901791"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1016\/j.commtr.2022.100068"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1108\/JICV-08-2021-0015"},{"key":"ref42","first-page":"21","article-title":"Coordinated deep reinforcement learners for traffic light control","volume-title":"Proc. Learn., Inference Control Multi-Agent Syst.","volume":"1","author":"Van der Pol"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2021.103535"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2013.2255286"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2019.2958859"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1108\/JICV-06-2022-0023"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2017.09.020"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1145\/3219819.3220096"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-25808-9_4"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1145\/3292500.3330949"},{"key":"ref51","first-page":"4079","article-title":"AttendLight: Universal attention-based reinforcement learning model for traffic signal control","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Oroojlooy"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1016\/j.sysarc.2021.102374"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.3390\/s21072302"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-28929-8"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.11418"},{"key":"ref56","article-title":"High-dimensional continuous control using generalized advantage estimation","author":"Schulman","year":"2015","journal-title":"arXiv:1506.02438"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.2307\/1913641"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/TNN.1998.712192"},{"key":"ref59","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014","journal-title":"arXiv:1412.6980"},{"key":"ref60","first-page":"4385","article-title":"Asynchronous actor-critic for multi-agent reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Xiao"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA46639.2022.9811771"},{"key":"ref62","first-page":"1107","article-title":"Asynchronous multi-agent reinforcement learning for efficient real-time multi-robot cooperative exploration","volume-title":"Proc. Int. Conf. Auto. Agents Multiagent Syst.","author":"Yu"},{"key":"ref63","article-title":"Is independent learning all you need in the StarCraft multi-agent challenge?","author":"de Witt","year":"2020","journal-title":"arXiv:2011.09533"},{"key":"ref64","article-title":"Policy regularization via noisy advantage values for cooperative multi-agent actor-critic methods","author":"Hu","year":"2021","journal-title":"arXiv:2106.14334"},{"key":"ref65","first-page":"24611","article-title":"The surprising effectiveness of PPO in cooperative multi-agent games","volume-title":"Advances in Neural Information Processing Systems","volume":"35","author":"Yu","year":"2022"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2018.8569938"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1063\/pt.3.2314"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2013.08.014"}],"container-title":["IEEE Transactions on Intelligent Transportation Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6979\/10543072\/10379485.pdf?arnumber=10379485","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,3]],"date-time":"2024-09-03T04:41:04Z","timestamp":1725338464000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10379485\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6]]},"references-count":68,"journal-issue":{"issue":"6"},"URL":"https:\/\/doi.org\/10.1109\/tits.2023.3344585","relation":{},"ISSN":["1524-9050","1558-0016"],"issn-type":[{"value":"1524-9050","type":"print"},{"value":"1558-0016","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,6]]}}}