{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T14:29:45Z","timestamp":1774621785785,"version":"3.50.1"},"reference-count":31,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Innovate UK Deepsafe","award":["10063539"],"award-info":[{"award-number":["10063539"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2025,3]]},"DOI":"10.1109\/lra.2025.3531146","type":"journal-article","created":{"date-parts":[[2025,1,17]],"date-time":"2025-01-17T18:41:11Z","timestamp":1737139271000},"page":"2478-2485","source":"Crossref","is-referenced-by-count":4,"title":["IntNet: A Communication-Driven Multi-Agent Reinforcement Learning Framework for Cooperative Autonomous Driving"],"prefix":"10.1109","volume":"10","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0571-0178","authenticated-orcid":false,"given":"Leandro","family":"Parada","sequence":"first","affiliation":[{"name":"Centre for Transport Engineering and Modelling, Department of Civil and Environmental Engineering, Imperial College London, London, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-4515-3296","authenticated-orcid":false,"given":"Kevin","family":"Yu","sequence":"additional","affiliation":[{"name":"Centre for Transport Engineering and Modelling, Department of Civil and Environmental Engineering, Imperial College London, London, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6778-8264","authenticated-orcid":false,"given":"Panagiotis","family":"Angeloudis","sequence":"additional","affiliation":[{"name":"Centre for Transport Engineering and Modelling, Department of Civil and Environmental Engineering, Imperial College London, London, U.K."}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/IROS47612.2022.9981319"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2023.3285442"},{"key":"ref3","article-title":"Multi-agent reinforcement learning for networked system control","volume-title":"Proc. 8th Int. Conf. Learn. Representations (ICLR)","author":"Chu","year":"2020"},{"key":"ref4","first-page":"1538","article-title":"TarMAC: Targeted multi-agent communication","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Das","year":"2019"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/s43684-022-00045-z"},{"key":"ref6","article-title":"A DRL-based multiagent cooperative control framework for CAV networks: A graphic convolution Q network","author":"Dong","year":"2020"},{"key":"ref7","article-title":"Learning to communicate with deep multi-agent reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"29","author":"Foerster","year":"2016"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1049\/itr2.12364"},{"key":"ref9","article-title":"Learning attentional communication for multi-agent cooperation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Jiang","year":"2018"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(98)00023-X"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.3141\/1999-10"},{"key":"ref12","article-title":"Communication in multi-agent reinforcement learning: Intention sharing","volume-title":"Proc. Int. Conf. Learn. Representations (ICLR)","author":"Kim","year":"2021"},{"key":"ref13","article-title":"An environment for autonomous driving decision-making","author":"Leurent","year":"2018","journal-title":"GitHub Repository"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i05.6211"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/icra.2018.8463189"},{"key":"ref16","article-title":"Multi-agent graph-attention communication and teaming","volume-title":"Proc. 20th Int. Conf. Auton. Agents Multiagent Syst. (AAMAS 2021)","author":"Niu","year":"2021"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.3390\/s23073625"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1080\/23249935.2023.2246586"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC57777.2023.10422217"},{"key":"ref20","article-title":"Safe, Multi-agent, reinforcement learning for autonomous driving","author":"Shalev-Shwartz","year":"2016"},{"key":"ref21","article-title":"Learning when to communicate at scale in multiagent cooperative and competitive tasks","author":"Singh","year":"2018"},{"key":"ref22","article-title":"Learning multiagent communication with backpropagation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"29","author":"Sukhbaatar","year":"2016"},{"key":"ref23","article-title":"Collaborative multiagent decision making for lane-free autonomous driving","volume-title":"Proc. 20th Int. Conf. Auton. Agents Multiagent Syst. (AAMAS 2021)","author":"Troullinos","year":"2021"},{"key":"ref24","first-page":"9908","article-title":"Learning efficient multi-agent communication: An information bottleneck approach","volume-title":"Proc. 37th Int. Conf. Mach. Learn.","author":"Wang","year":"2020"},{"key":"ref25","article-title":"Too many cooks: Coordinating multi-agent collaboration through inverse planning","volume-title":"Proc. Int. Joint Conf. Auton. Agents Multiagent Syst. (AAMAS)","author":"Wang","year":"2020"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2021.103046"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1049\/itr2.12328"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2020.2997896"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.3390\/s23104710"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/s43684-022-00023-5"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1007\/s10458-024-09644-x"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7083369\/10849592\/10844516.pdf?arnumber=10844516","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,2,5]],"date-time":"2025-02-05T05:57:16Z","timestamp":1738735036000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10844516\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3]]},"references-count":31,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/lra.2025.3531146","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"value":"2377-3766","type":"electronic"},{"value":"2377-3774","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,3]]}}}