{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,23]],"date-time":"2026-03-23T21:07:52Z","timestamp":1774300072748,"version":"3.50.1"},"reference-count":36,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Major Special Projects in Zhengzhou","award":["2021KJZX0060-5"],"award-info":[{"award-number":["2021KJZX0060-5"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["52502520"],"award-info":[{"award-number":["52502520"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Intell. Transport. Syst."],"published-print":{"date-parts":[[2026,3]]},"DOI":"10.1109\/tits.2026.3656268","type":"journal-article","created":{"date-parts":[[2026,2,12]],"date-time":"2026-02-12T21:01:24Z","timestamp":1770930084000},"page":"2957-2969","source":"Crossref","is-referenced-by-count":0,"title":["Q-Advantage Integrated Human-Guided Reinforcement Learning for Safe End-to-End Autonomous Driving"],"prefix":"10.1109","volume":"27","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-7607-1188","authenticated-orcid":false,"given":"Yong","family":"Wang","sequence":"first","affiliation":[{"name":"Department of Data and Systems Engineering, University of Hong Kong, Hong Kong, China"}]},{"given":"Pei","family":"Wang","sequence":"additional","affiliation":[{"name":"Seres Automobile, Chongqing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2874-1858","authenticated-orcid":false,"given":"Hongwen","family":"He","sequence":"additional","affiliation":[{"name":"School of Mechanical Engineering, Beijing Institute of Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7336-4492","authenticated-orcid":false,"given":"Jingda","family":"Wu","sequence":"additional","affiliation":[{"name":"School of Mechanical Engineering, Beijing Institute of Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4838-7211","authenticated-orcid":false,"given":"Yingjuan","family":"Tang","sequence":"additional","affiliation":[{"name":"School of Mechanical Engineering, Beijing Institute of Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-6871-0205","authenticated-orcid":false,"given":"Zirui","family":"Kuang","sequence":"additional","affiliation":[{"name":"School of Mechanical Engineering, Beijing Institute of Technology, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2023.126587"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2024.3474469"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2025.127629"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/IROS47612.2022.9981775"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/JSEN.2024.3382406"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TTE.2025.3563395"},{"key":"ref7","article-title":"End-to-end autonomous driving: Challenges and frontiers","author":"Chen","year":"2023","journal-title":"arXiv:2306.16927"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2022.120563"},{"key":"ref9","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton","year":"2018"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3314762"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3063927"},{"key":"ref12","first-page":"21885","article-title":"Widening the pipeline in human-guided reinforcement learning with explanation and context-aware data augmentation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Guan"},{"key":"ref13","first-page":"6611","article-title":"Guided exploration with proximal policy optimization using a single demonstration","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Libardi"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2022.3187016"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2025.3530143"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2024.3447070"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/j.eng.2022.05.017"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2024.3420959"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793698"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVCI63518.2024.10830150"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.2024.3384992"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2025.3583508"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1038\/s41467-025-58192-9"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1016\/j.eng.2024.10.021"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2025.3588176"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TII.2020.3015748"},{"key":"ref27","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fujimoto"},{"key":"ref28","article-title":"Trial without error: Towards safe reinforcement learning via human intervention","author":"Saunders","year":"2017","journal-title":"arXiv:1707.05173"},{"key":"ref29","article-title":"Continuous control with deep reinforcement learning","author":"Lillicrap","year":"2015","journal-title":"arXiv:1509.02971"},{"key":"ref30","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja"},{"key":"ref31","first-page":"8011","article-title":"Reward learning from human preferences and demonstrations in atari","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Ibarz"},{"key":"ref32","first-page":"2052","article-title":"Off-policy deep reinforcement learning without exploration","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","volume":"97","author":"Fujimoto"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TTE.2023.3346874"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01871"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1007\/s42154-025-00362-y"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2024.3524609"}],"container-title":["IEEE Transactions on Intelligent Transportation Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6979\/11435208\/11395317.pdf?arnumber=11395317","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,23]],"date-time":"2026-03-23T20:10:36Z","timestamp":1774296636000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11395317\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,3]]},"references-count":36,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/tits.2026.3656268","relation":{},"ISSN":["1524-9050","1558-0016"],"issn-type":[{"value":"1524-9050","type":"print"},{"value":"1558-0016","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,3]]}}}