{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,8]],"date-time":"2026-05-08T16:35:52Z","timestamp":1778258152728,"version":"3.51.4"},"reference-count":61,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2022]]},"DOI":"10.1109\/tpami.2022.3190471","type":"journal-article","created":{"date-parts":[[2022,7,13]],"date-time":"2022-07-13T19:38:12Z","timestamp":1657741092000},"page":"1-14","source":"Crossref","is-referenced-by-count":156,"title":["MetaDrive: Composing Diverse Driving Scenarios for Generalizable Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Quanyi","family":"Li","sequence":"first","affiliation":[{"name":"Centre for Perceptual, Interactive Intelligence, China"}]},{"given":"Zhenghao","family":"Peng","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong, China"}]},{"given":"Lan","family":"Feng","sequence":"additional","affiliation":[{"name":"ETH Zurich, Switzerland"}]},{"given":"Qihang","family":"Zhang","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong, China"}]},{"given":"Zhenghai","family":"Xue","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4030-0684","authenticated-orcid":false,"given":"Bolei","family":"Zhou","sequence":"additional","affiliation":[{"name":"University of California, Los Angeles, USA"}]}],"member":"263","reference":[{"key":"ref1","first-page":"22","article-title":"Constrained policy optimization","volume-title":"Proc. 34th Int. Conf. Mach. Learn.","author":"Achiam"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1177\/0278364919887447"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1613\/jair.3912"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9197228"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00895"},{"key":"ref6","article-title":"Duckietown environments for openai gym","author":"Chevalier-Boisvert","year":"2018"},{"key":"ref7","article-title":"Leveraging procedural generation to benchmark reinforcement learning","author":"Cobbe","year":"2019"},{"key":"ref8","first-page":"1282","article-title":"Quantifying generalization in reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Cobbe"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-021-04301-9"},{"key":"ref10","first-page":"1","article-title":"CARLA: An open urban driving simulator","volume-title":"Proc. 1st Annu. Conf. Robot Learn.","author":"Dosovitskiy"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2019.8813885"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/MC.2004.180"},{"key":"ref13","first-page":"1110","article-title":"Learning to walk in the real world with minimal human effort","author":"Ha","year":"2021","journal-title":"Proc. Conf. Robot Learn."},{"key":"ref14","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/MPRV.2008.80"},{"key":"ref16","first-page":"1","article-title":"Learning a decision module by imitating drivers control behaviors","author":"Huang","year":"2021","journal-title":"Proc. Conf. Robot Learn."},{"key":"ref17","article-title":"Intrinsic social motivation via causal influence in multi-agent RL","author":"Jaques","year":"2018"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793742"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.3141\/1999-10"},{"key":"ref20","article-title":"DriverGym: Democratising reinforcement learning for autonomous driving","author":"Kothari","year":"2021"},{"key":"ref21","article-title":"Conservative Q-learning for offline reinforcement learning","author":"Kumar","year":"2020"},{"key":"ref22","article-title":"An environment for autonomous driving decision-making","author":"Leurent","year":"2018"},{"key":"ref23","first-page":"455","article-title":"iGibson 2.0: Object-centric simulation for robot learning of everyday household tasks","author":"Li","year":"2022","journal-title":"Proc. Conf. Robot Learn."},{"key":"ref24","article-title":"Efficient learning of safe driving policy via human-AI copilot optimization","volume-title":"Proc. Int. Conf. Representation Learn.","author":"Li"},{"key":"ref25","first-page":"3053","article-title":"RLlib: Abstractions for distributed reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Liang"},{"key":"ref26","article-title":"Waymo open dataset: An autonomous driving dataset","year":"2019"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2018.8569938"},{"key":"ref28","article-title":"Beyond grand theft auto v for training, testing and enhancing deep learning in self driving cars","author":"Martinez","year":"2017"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1016\/j.compeleceng.2019.07.019"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11492"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-018-1073-7"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2017\/757"},{"key":"ref33","article-title":"Emergent road rules in multi-agent driving environments","author":"Pal","year":"2020"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN48605.2020.9207663"},{"key":"ref35","first-page":"10784","article-title":"Learning to simulate self-driven particles system with coordinated policy optimization","volume-title":"Proc. Adv. Neural Informat. Process. Syst.","volume":"34","author":"Peng"},{"key":"ref36","first-page":"1554","article-title":"Safe driving via expert guided policy optimization","volume-title":"Proc. Conf. Robot Learn.","author":"Peng"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2017.7995816"},{"issue":"268","key":"ref38","first-page":"1","article-title":"Stable-baselines3: Reliable reinforcement learning implementations","volume":"22","author":"Raffin","year":"2021","journal-title":"J. Mach. Learn. Res."},{"key":"ref39","article-title":"Benchmarking safe exploration in deep reinforcement learning","author":"Ray","year":"2019"},{"key":"ref40","article-title":"Open-sourced reinforcement learning environments for surgical robotics","author":"Richter","year":"2019"},{"key":"ref41","article-title":"The starcraft multi-agent challenge","author":"Samvelyan","year":"2019"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.12531"},{"key":"ref43","article-title":"Is independent learning all you need in the starcraft multi-agent challenge?","author":"de Witt","year":"2020"},{"key":"ref44","article-title":"High-dimensional continuous control using generalized advantage estimation","author":"Schulman","year":"2015"},{"key":"ref45","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1504\/IJVP.2019.097096"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-67361-5_40"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1038\/nature16961"},{"key":"ref49","first-page":"9133","article-title":"Responsive safety in reinforcement learning by PID lagrangian methods","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Stooke"},{"key":"ref50","first-page":"21","article-title":"Neuro-symbolic program search for autonomous driving decision module design","volume-title":"Proc. Conf. Robot Learn.","author":"Sun"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00252"},{"key":"ref52","article-title":"Deepdrive: A simulator that allows anyone with a PC to push the state-of-the-art in self-driving"},{"key":"ref53","article-title":"Udacitys self-driving car simulator: A self-driving car simulator built with unity"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref55","first-page":"399","article-title":"Benchmarks for reinforcement learning in mixed-autonomy traffic","volume-title":"Proc. Conf. Robot Learn.","author":"Vinitsky"},{"key":"ref56","article-title":"Flow: Architecture and benchmarking for reinforcement learning in traffic control","author":"Wu","year":"2017"},{"key":"ref57","article-title":"TORCS, the open racing car simulator","author":"Wymann","year":"2000"},{"key":"ref58","first-page":"5571","article-title":"Mean field multi-agent reinforcement learning","volume-title":"Proc. 35th Int. Conf. Mach. Learn.","author":"Yang"},{"key":"ref59","first-page":"1094","article-title":"Meta-world: A benchmark and evaluation for multi-task and meta reinforcement learning","volume-title":"Proc. Conf. Robot Learn.","author":"Yu"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1145\/3308558.3314139"},{"key":"ref61","first-page":"264","article-title":"SMARTS: Scalable multi-agent reinforcement learning training school for autonomous driving","volume":"155","author":"Zhou","year":"2020"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/34\/4359286\/09829243.pdf?arnumber=9829243","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,1]],"date-time":"2024-02-01T06:14:04Z","timestamp":1706768044000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9829243\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"references-count":61,"URL":"https:\/\/doi.org\/10.1109\/tpami.2022.3190471","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]}}}