{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,22]],"date-time":"2025-03-22T04:20:25Z","timestamp":1742617225581,"version":"3.40.2"},"reference-count":15,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,9,24]],"date-time":"2024-09-24T00:00:00Z","timestamp":1727136000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,9,24]],"date-time":"2024-09-24T00:00:00Z","timestamp":1727136000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,9,24]]},"DOI":"10.1109\/itsc58415.2024.10920085","type":"proceedings-article","created":{"date-parts":[[2025,3,21]],"date-time":"2025-03-21T19:00:11Z","timestamp":1742583611000},"page":"1932-1937","source":"Crossref","is-referenced-by-count":0,"title":["Efficient Replay Memory Architectures in Multi-Agent Reinforcement Learning for Traffic Congestion Control"],"prefix":"10.1109","author":[{"given":"Mukul","family":"Chodhary","sequence":"first","affiliation":[{"name":"University of Melbourne,Department of Electrical and Electronic Engineering,Australia"}]},{"given":"Kevin","family":"Octavian","sequence":"additional","affiliation":[{"name":"School of Electrical Engineering, KAIST,S. Korea"}]},{"given":"SooJean","family":"Han","sequence":"additional","affiliation":[{"name":"School of Electrical Engineering, KAIST,S. Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref2","article-title":"Soft Actor-Critic: Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor","author":"Haarnoja","year":"2018","journal-title":"ArXiv preprint"},{"key":"ref3","first-page":"3061","article-title":"Revisiting fundamentals of experience replay","volume-title":"Proceedings of the 37th International Conference on Machine Learning (ICML)","volume":"119","author":"Fedus","year":"2020"},{"key":"ref4","first-page":"2827","article-title":"Neural Episodic Control","volume-title":"Proceedings of the 34th International Conference on Machine Learning (ICML)","volume":"70","author":"Pritzel","year":"2017"},{"key":"ref5","first-page":"903","article-title":"Congestion control of vehicle traffic networks by learning structural and temporal patterns","volume-title":"Proceedings of The 5th Annual Learning for Dynamics and Control Conference (L4DC)","volume":"211","author":"Han","year":"2023"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1146\/annurev-psych-122414-033625"},{"key":"ref7","first-page":"2990","article-title":"Group equivariant convolutional networks","volume-title":"Proceedings of The 33rd International Conference on Machine Learning (ICML)","volume":"48","author":"Cohen","year":"2016"},{"key":"ref8","article-title":"Relational inductive biases, deep learning, and graph networks","author":"Battaglia","year":"2018","journal-title":"ArXiv preprint"},{"key":"ref9","article-title":"Efficient Replay Memory Architectures in Multi-Agent Reinforcement Learning for Traffic Congestion Control","author":"Chodhary","year":"2024","journal-title":"ArXiv preprint"},{"key":"ref10","first-page":"1","article-title":"Playing Atari with Deep Reinforcement Learning","volume-title":"NIPS Workshop on Deep Learning","author":"Mnih","year":"2013"},{"key":"ref11","first-page":"1","article-title":"Hippocampal contributions to control: The third way","volume-title":"Advances in Neural Information Processing Systems (NeurIPS)","volume":"20","author":"Lengyel","year":"2007"},{"key":"ref12","article-title":"Model-Free Episodic Control","author":"Blundell","year":"2016","journal-title":"ArXiv preprint"},{"key":"ref13","first-page":"2827","article-title":"Neural Episodic Control","volume-title":"Proceedings of the 34th International Conference on Machine Learning (ICML)","author":"Pritzel","year":"2017"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/tnn.1998.712192"},{"key":"ref15","article-title":"Is independent learning all you need in the starcraft multi-agent challenge?","author":"de Witt","year":"2020","journal-title":"ArXiv preprint"}],"event":{"name":"2024 IEEE 27th International Conference on Intelligent Transportation Systems (ITSC)","start":{"date-parts":[[2024,9,24]]},"location":"Edmonton, AB, Canada","end":{"date-parts":[[2024,9,27]]}},"container-title":["2024 IEEE 27th International Conference on Intelligent Transportation Systems (ITSC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10919469\/10919190\/10920085.pdf?arnumber=10920085","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,3,21]],"date-time":"2025-03-21T23:18:11Z","timestamp":1742599091000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10920085\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,24]]},"references-count":15,"URL":"https:\/\/doi.org\/10.1109\/itsc58415.2024.10920085","relation":{},"subject":[],"published":{"date-parts":[[2024,9,24]]}}}