{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,7]],"date-time":"2025-08-07T20:38:07Z","timestamp":1754599087334,"version":"3.28.0"},"reference-count":31,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,6,2]],"date-time":"2024-06-02T00:00:00Z","timestamp":1717286400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,6,2]],"date-time":"2024-06-02T00:00:00Z","timestamp":1717286400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,6,2]]},"DOI":"10.1109\/iv55156.2024.10588785","type":"proceedings-article","created":{"date-parts":[[2024,7,15]],"date-time":"2024-07-15T17:19:28Z","timestamp":1721063968000},"page":"2345-2352","source":"Crossref","is-referenced-by-count":2,"title":["A Pseudo-Hierarchical Planning Framework with Dynamic-Aware Reinforcement Learning for Autonomous Driving"],"prefix":"10.1109","author":[{"given":"Qi","family":"Deng","sequence":"first","affiliation":[{"name":"Inspur (Beijing) Electronic Information Industry Co., Ltd,Beijing,China,100085"}]},{"given":"Yaqian","family":"Zhao","sequence":"additional","affiliation":[{"name":"Inspur (Beijing) Electronic Information Industry Co., Ltd,Beijing,China,100085"}]},{"given":"Rengang","family":"Li","sequence":"additional","affiliation":[{"name":"Inspur (Beijing) Electronic Information Industry Co., Ltd,Beijing,China,100085"}]},{"given":"Qifu","family":"Hu","sequence":"additional","affiliation":[{"name":"Inspur (Beijing) Electronic Information Industry Co., Ltd,Beijing,China,100085"}]},{"given":"Tengfei","family":"Zhang","sequence":"additional","affiliation":[{"name":"Inspur (Beijing) Electronic Information Industry Co., Ltd,Beijing,China,100085"}]},{"given":"Heng","family":"Zhang","sequence":"additional","affiliation":[{"name":"Inspur (Beijing) Electronic Information Industry Co., Ltd,Beijing,China,100085"}]},{"given":"Ruyang","family":"Li","sequence":"additional","affiliation":[{"name":"Inspur (Beijing) Electronic Information Industry Co., Ltd,Beijing,China,100085"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2020.3046646"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01494"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2023.XIX.102"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3171915"},{"key":"ref5","article-title":"Unsupervised skill discovery via recurrent skill training","author":"Jiang","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/IV47402.2020.9304744"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2021.3115235"},{"key":"ref8","doi-asserted-by":"crossref","first-page":"303","DOI":"10.1007\/978-981-99-8076-5_22","article-title":"Pnp: Integrated prediction and planning for interactive lane change in dense traffic","volume-title":"30th International Conference on Neural Information Processing (ICONIP)","volume":"14452","author":"Liu"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/itsc57777.2023.10422695"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.2352\/ISSN.2470-1173.2017.19.AVM-023"},{"key":"ref11","first-page":"16691","article-title":"Recurrent model-free RL can be a strong baseline for many pomdps","volume-title":"2022 International Conference on Machine Learning (ICML)","volume":"162","author":"Ni"},{"key":"ref12","first-page":"1","article-title":"Context-aware metarl with two-stage constrained adaptation for urban driving","author":"Deng","year":"2023","journal-title":"IEEE Transactions on Vehicular Technology"},{"article-title":"Meta-q-learning","volume-title":"8th International Conference on Learning Representations (ICLR)","author":"Fakoor","key":"ref13"},{"key":"ref14","first-page":"7487","article-title":"Stabilizing transformers for reinforcement learning","volume-title":"Proceedings of the 37th International Conference on Machine Learning (ICML)","volume":"119","author":"Parisotto"},{"key":"ref15","first-page":"4351","article-title":"Been there, done that: Meta-learning with episodic recall","volume-title":"Proceedings of the 35th International Conference on Machine Learning (ICML)","volume":"80","author":"Ritter"},{"key":"ref16","first-page":"188","article-title":"Accelerating reinforcement learning with learned skill priors","volume-title":"4th Conference on Robot Learning (CoRL)","volume":"155","author":"Pertsch"},{"article-title":"Learning transferable motor skills with hierarchical latent mixture policies","volume-title":"10th International Conference on Learning Representations (ICLR)","author":"Rao","key":"ref17"},{"key":"ref18","first-page":"21847","article-title":"Accelerating robotic reinforcement learning via parameterized action primitives","author":"Dalal","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1177\/02783649211004615"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/IROS55552.2023.10341449"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/IROS40897.2019.8968201"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/IROS40897.2019.8968560"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2023.3269533"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3134249"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2022.3225721"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2019.2891792"},{"key":"ref27","first-page":"5331","article-title":"Efficient offpolicy meta-reinforcement learning via probabilistic context variables","volume-title":"Proceedings of the 36th International Conference on Machine Learning (ICML)","volume":"97","author":"Rakelly"},{"article-title":"Varibad: A very good method for bayes-adaptive deep RL via meta-learning","volume-title":"8th International Conference on Learning Representations (ICLR)","author":"Zintgraf","key":"ref28"},{"key":"ref29","volume":"abs\/1707.06347","author":"Schulman","year":"2017","journal-title":"Proximal policy optimization algorithms"},{"key":"ref30","first-page":"1856","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proceedings of the 35th International Conference on Machine Learning (ICML)","volume":"80","author":"Haarnoja"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3190471"}],"event":{"name":"2024 IEEE Intelligent Vehicle Symposium (IV)","start":{"date-parts":[[2024,6,2]]},"location":"Jeju Island, Korea, Republic of","end":{"date-parts":[[2024,6,5]]}},"container-title":["2024 IEEE Intelligent Vehicles Symposium (IV)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10587320\/10588370\/10588785.pdf?arnumber=10588785","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,7,19]],"date-time":"2024-07-19T05:03:56Z","timestamp":1721365436000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10588785\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6,2]]},"references-count":31,"URL":"https:\/\/doi.org\/10.1109\/iv55156.2024.10588785","relation":{},"subject":[],"published":{"date-parts":[[2024,6,2]]}}}