{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,12]],"date-time":"2026-03-12T07:25:31Z","timestamp":1773300331000,"version":"3.50.1"},"reference-count":31,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,7,8]],"date-time":"2025-07-08T00:00:00Z","timestamp":1751932800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,7,8]],"date-time":"2025-07-08T00:00:00Z","timestamp":1751932800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,7,8]]},"DOI":"10.23919\/acc63710.2025.11107724","type":"proceedings-article","created":{"date-parts":[[2025,8,21]],"date-time":"2025-08-21T18:17:51Z","timestamp":1755800271000},"page":"2998-3003","source":"Crossref","is-referenced-by-count":1,"title":["Enhancing Autonomous Driving Policy Stability through Auxiliary Network in Reinforcement Learning from Human Feedback"],"prefix":"10.23919","author":[{"given":"Hengcong","family":"Guo","sequence":"first","affiliation":[]},{"given":"Junfeng","family":"Zhao","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref1","article-title":"A survey of reinforcement learning from human feedback","author":"Kaufmann","year":"2023"},{"key":"ref2","article-title":"Deep reinforcement learning from human preferences","volume-title":"Advances in neural information processing systems","volume":"30","author":"Christiano"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11757"},{"key":"ref4","article-title":"Interactive learning from policy-dependent human feedback","volume-title":"International Conference on machine learning","author":"MacGlashan"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.4271\/2022-01-0807"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.3390\/electronics13112054"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/s10458-024-09659-4"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ROBIO.2017.8324787"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.23919\/ChiCC.2018.8482790"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/IV51971.2022.9827073"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.energy.2023.129472"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3142822"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.3390\/machines10080609"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i06.6602"},{"key":"ref15","article-title":"Efficient learning of safe driving policy via human-ai copilot optimization","author":"Li","year":"2022"},{"key":"ref16","article-title":"Learning from active human involvement through proxy value propagation","volume-title":"Advances in neural information processing systems","volume":"36","author":"Peng"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/j.commtr.2024.100127"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2025.105262"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/j.eng.2022.05.017"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3314762"},{"key":"ref21","article-title":"Guarded policy optimization with imperfect online demonstrations","author":"Xue","year":"2023"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN52387.2021.9533791"},{"key":"ref23","article-title":"Reinforcement learning: An introduction","author":"Sutton","year":"2018","journal-title":"A Bradford Book"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1145\/203330.203343"},{"key":"ref25","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"International Conference on machine learning","author":"Fujimoto"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.32657\/10356\/90191"},{"key":"ref27","article-title":"CARLA: An open urban driving simulator","volume-title":"Conference on robot learning","author":"Dosovitskiy"},{"issue":"9","key":"ref28","article-title":"Variance Reduction Techniques for Gradient Estimates in Reinforcement Learning","volume":"5","author":"Greensmith","year":"2004","journal-title":"Journal of Machine Learning Research"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1016\/j.ifacol.2025.01.045"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.4271\/2024-01-1981"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/IAVVC57316.2023.10328077"}],"event":{"name":"2025 American Control Conference (ACC)","location":"Denver, CO, USA","start":{"date-parts":[[2025,7,8]]},"end":{"date-parts":[[2025,7,10]]}},"container-title":["2025 American Control Conference (ACC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11107441\/11107442\/11107724.pdf?arnumber=11107724","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T05:53:37Z","timestamp":1755842017000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11107724\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7,8]]},"references-count":31,"URL":"https:\/\/doi.org\/10.23919\/acc63710.2025.11107724","relation":{},"subject":[],"published":{"date-parts":[[2025,7,8]]}}}