{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,6]],"date-time":"2026-02-06T06:12:10Z","timestamp":1770358330919,"version":"3.49.0"},"reference-count":34,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,7,18]],"date-time":"2021-07-18T00:00:00Z","timestamp":1626566400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,7,18]],"date-time":"2021-07-18T00:00:00Z","timestamp":1626566400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,7,18]],"date-time":"2021-07-18T00:00:00Z","timestamp":1626566400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,7,18]]},"DOI":"10.1109\/ijcnn52387.2021.9534349","type":"proceedings-article","created":{"date-parts":[[2021,9,23]],"date-time":"2021-09-23T22:32:08Z","timestamp":1632436328000},"page":"1-8","source":"Crossref","is-referenced-by-count":14,"title":["Hierarchical Reinforcement Learning-Based Policy Switching Towards Multi-Scenarios Autonomous Driving"],"prefix":"10.1109","author":[{"given":"Youtian","family":"Guo","sequence":"first","affiliation":[]},{"given":"Qichao","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Junjie","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Shasha","family":"Liu","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref33","author":"eysenbach","year":"2019","journal-title":"Diversity is all you need Learning skills without a reward function"},{"key":"ref32","author":"osa","year":"2019","journal-title":"Hierarchical reinforcement learning via advantage-weighted information maximization"},{"key":"ref31","article-title":"SMARTS:scalable multi-agent reinforcement learning training school for autonomous driving","author":"zhou","year":"2020","journal-title":"Conference on Robot Learning November"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33015789"},{"key":"ref34","first-page":"2579","article-title":"Visualizing data using t-sne","author":"van der maaten","year":"2008","journal-title":"Journal of Machine Learning Research"},{"key":"ref10","first-page":"1737","article-title":"Learning parameterized skills","author":"da silva","year":"2014","journal-title":"Int'l Conference on Machine Learning"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2014.6907421"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460487"},{"key":"ref13","first-page":"3682","article-title":"Hierarchical deep reinforcement learning: integrating temporal abstraction and intrinsic motivation","author":"kulkarni","year":"2016","journal-title":"the International Conference on Neural Information Processing Systems"},{"key":"ref14","first-page":"3307","article-title":"Data-efficient hierarchical reinforcement learning","author":"nachum","year":"2018","journal-title":"the International Conference on Neural Information Processing Systems"},{"key":"ref15","first-page":"359","article-title":"Urban driving with multi-objective deep reinforcement learning","author":"li","year":"2019","journal-title":"International Conference on Autonomous Agents and Multiagent Systems"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2019.8813803"},{"key":"ref17","first-page":"7151","article-title":"End-to-end model-free reinforcement learning for urban driving using implicit affordances","author":"marin","year":"2020","journal-title":"IEEE Conf Computer Vision and Pattern Recognition"},{"key":"ref18","author":"shao","year":"2019","journal-title":"A Survey of Deep Reinforcement Learning in Video Games"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992696"},{"key":"ref28","article-title":"The option-critic architecture","volume":"31","author":"bacon","year":"2017","journal-title":"Association for the Advancement of Artificial Intelligence"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2019.8852110"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(99)00052-1"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2019.8851766"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2019.8851766"},{"key":"ref29","article-title":"Learnings options end-to-end for continuous action tasks","author":"klissarov","year":"2017","journal-title":"Neural Information Processing Systems"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/SSCI44817.2019.9003029"},{"key":"ref8","author":"tidd","year":"2020","journal-title":"Learning when to switch composing controllers to traverse a sequence of terrain artifacts"},{"key":"ref7","first-page":"1563","article-title":"Tactical decision-making in autonomous driving by reinforcement learning with uncertainty estimation","author":"hoel","year":"2020","journal-title":"IEEE Intelligent Vehicles Symposium"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793742"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2020.XVI.039"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/MCI.2019.2901089"},{"key":"ref20","first-page":"1889","article-title":"Trust region policy optimization","author":"schulman","year":"2015","journal-title":"Int'l Conference on Machine Learning"},{"key":"ref22","author":"schulman","year":"2016","journal-title":"High-dimensional continuous control using generalized advantage estimation"},{"key":"ref21","author":"schulman","year":"2017","journal-title":"Proximal policy optimization algorithms"},{"key":"ref24","first-page":"118","article-title":"The maxq method for hierarchical reinforcement learning","author":"dietterich","year":"1998","journal-title":"International Conference on Machine Learning"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.12785\/ijcds\/040207"},{"key":"ref26","first-page":"1043","article-title":"Reinforcement learning with hierarchies of machines","author":"parr","year":"1998","journal-title":"Advances in neural information processing systems"},{"key":"ref25","first-page":"271","article-title":"Feudal reinforcement learning","author":"dayan","year":"1992","journal-title":"Neural Information Processing Systems"}],"event":{"name":"2021 International Joint Conference on Neural Networks (IJCNN)","location":"Shenzhen, China","start":{"date-parts":[[2021,7,18]]},"end":{"date-parts":[[2021,7,22]]}},"container-title":["2021 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9533266\/9533267\/09534349.pdf?arnumber=9534349","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T15:46:02Z","timestamp":1652197562000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9534349\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,7,18]]},"references-count":34,"URL":"https:\/\/doi.org\/10.1109\/ijcnn52387.2021.9534349","relation":{},"subject":[],"published":{"date-parts":[[2021,7,18]]}}}