{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,21]],"date-time":"2026-02-21T18:18:09Z","timestamp":1771697889077,"version":"3.50.1"},"reference-count":49,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2023,3,1]],"date-time":"2023-03-01T00:00:00Z","timestamp":1677628800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,3,1]],"date-time":"2023-03-01T00:00:00Z","timestamp":1677628800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,3,1]],"date-time":"2023-03-01T00:00:00Z","timestamp":1677628800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100004405","name":"Toyota Motor Corporation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100004405","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Ontario Centers of Excellence"},{"name":"NSERC Canada"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Intell. Veh."],"published-print":{"date-parts":[[2023,3]]},"DOI":"10.1109\/tiv.2022.3167616","type":"journal-article","created":{"date-parts":[[2022,4,14]],"date-time":"2022-04-14T19:38:20Z","timestamp":1649965100000},"page":"2604-2615","source":"Crossref","is-referenced-by-count":21,"title":["Deep Reinforcement Learning With NMPC Assistance Nash Switching for Urban Autonomous Driving"],"prefix":"10.1109","volume":"8","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5409-7149","authenticated-orcid":false,"given":"Sina","family":"Alighanbari","sequence":"first","affiliation":[{"name":"SHEVS lab at the Department of Systems Design Engineering, University of Waterloo, Waterloo, ON, Canada"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1412-7961","authenticated-orcid":false,"given":"Nasser L.","family":"Azad","sequence":"additional","affiliation":[{"name":"Faculty of Engineering, Department of Systems Design Engineering, University of Waterloo, Waterloo, ON, Canada"}]}],"member":"263","reference":[{"key":"ref13","article-title":"Concrete problems in AI safety","author":"amodei","year":"2016"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11694"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8794446"},{"key":"ref14","article-title":"Prioritized experience replay","author":"schaul","year":"2015"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1146\/annurev-control-053018-023825"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1038\/nature16961"},{"key":"ref17","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2017.8317839"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/BF00993306"},{"key":"ref18","article-title":"Massively parallel methods for deep reinforcement learning","author":"nair","year":"2015"},{"key":"ref46","first-page":"1039","article-title":"Nash Q-learning for general-sum stochastic games","volume":"4","author":"hu","year":"2003","journal-title":"J Mach Learn Res"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CAVS51000.2020.9334596"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1996.8.3.643"},{"key":"ref47","first-page":"1339","article-title":"Gradient descent learns one-hidden-layer CNN: Don&#x2019;t be afraid of spurious local minima","author":"du","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref42","article-title":"Safe exploration of nonlinear dynamical systems: A predictive safety filter for reinforcement learning","author":"wabersich","year":"2018"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2019.2913768"},{"key":"ref44","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2018.2876389"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3119915"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8461233"},{"key":"ref7","first-page":"4742","article-title":"Structured control nets for deep reinforcement learning","author":"srouji","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref9","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2020.102662"},{"key":"ref3","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TIV.2019.2919467"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TIV.2019.2955905"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487175"},{"key":"ref35","article-title":"Ecological control and coordination of connected and automated PHEVs at roundabouts under uncertainty","author":"alighanbari","year":"2019"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TIV.2019.2955362"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-11021-5_35"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2018.00062"},{"key":"ref31","article-title":"Affordance-based reinforcement learning for urban driving","author":"agarwal","year":"2021"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/WACVW52041.2021.00020"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2019.8813769"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8461203"},{"key":"ref2","volume":"135","author":"sutton","year":"1998","journal-title":"Introduction to Reinforcement Learning"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2021.3054625"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TIV.2016.2578706"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TIV.2017.2749181"},{"key":"ref24","first-page":"1633","article-title":"Transfer learning for reinforcement learning domains: A survey","volume":"10","author":"taylor","year":"2009","journal-title":"J Mach Learn Res"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01234-2_36"},{"key":"ref26","first-page":"1126","article-title":"Model-agnostic meta-learning for fast adaptation of deep networks","author":"finn","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref25","article-title":"Learning to reinforcement learn","author":"wang","year":"2016"},{"key":"ref20","article-title":"Reinforcement learning from imperfect demonstrations","author":"gao","year":"2018"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11757"},{"key":"ref21","article-title":"Learning complex dexterous manipulation with deep reinforcement learning and demonstrations","author":"rajeswaran","year":"2017"},{"key":"ref28","article-title":"Progressive neural networks","author":"rusu","year":"2016"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8206049"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00718"}],"container-title":["IEEE Transactions on Intelligent Vehicles"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7274857\/10109992\/09757810.pdf?arnumber=9757810","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,5,15]],"date-time":"2023-05-15T18:53:31Z","timestamp":1684176811000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9757810\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,3]]},"references-count":49,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/tiv.2022.3167616","relation":{},"ISSN":["2379-8904","2379-8858"],"issn-type":[{"value":"2379-8904","type":"electronic"},{"value":"2379-8858","type":"print"}],"subject":[],"published":{"date-parts":[[2023,3]]}}}