{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T19:32:45Z","timestamp":1773775965265,"version":"3.50.1"},"reference-count":25,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,10,19]],"date-time":"2020-10-19T00:00:00Z","timestamp":1603065600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,10,19]],"date-time":"2020-10-19T00:00:00Z","timestamp":1603065600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,10,19]],"date-time":"2020-10-19T00:00:00Z","timestamp":1603065600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,10,19]]},"DOI":"10.1109\/iv47402.2020.9304668","type":"proceedings-article","created":{"date-parts":[[2021,1,10]],"date-time":"2021-01-10T07:14:14Z","timestamp":1610262854000},"page":"1746-1752","source":"Crossref","is-referenced-by-count":108,"title":["Automated Lane Change Strategy using Proximal Policy Optimization-based Deep Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Fei","family":"Ye","sequence":"first","affiliation":[]},{"given":"Xuxin","family":"Cheng","sequence":"additional","affiliation":[]},{"given":"Pin","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Ching-Yao","family":"Chan","sequence":"additional","affiliation":[]},{"given":"Jiucai","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","author":"wang","year":"2019","journal-title":"Cooperative lane changing via deep reinforcement learning"},{"key":"ref11","article-title":"Tactical decision making for lane changing with deep reinforcement learning","author":"mukadam","year":"0","journal-title":"Proc NIPS Workshop Mach Learn Intell Transp Syst"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2018.8500556"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2019.8917392"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2018.8569448"},{"key":"ref15","first-page":"1","article-title":"A reinforcement learning approach to autonomous decision making of intelligent vehicles on highways","author":"xu","year":"2018","journal-title":"IEEE Trans Syst Man Cybern Syst"},{"key":"ref16","author":"alshiekh","year":"2017","journal-title":"Safe reinforcement learning via shielding"},{"key":"ref17","author":"schulman","year":"2017","journal-title":"Proximal policy optimization algorithms"},{"key":"ref18","first-page":"1889","article-title":"Trust region policy optimization","author":"schulman","year":"0","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref19","author":"saunders","year":"2017","journal-title":"Trial without Error Towards Safe Reinforcement Learning via Human Intervention"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2019.8813903"},{"key":"ref3","author":"mnih","year":"2013","journal-title":"Playing atari with deep reinforcement learning"},{"key":"ref6","article-title":"Quadratic Q-network for learning continuous control for autonomous vehicles","author":"wang","year":"0","journal-title":"NIPS Workshop Mach Learn Auton Drivingv"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.2352\/ISSN.2470-1173.2017.19.AVM-023"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2018.06.007"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2012.2187447"},{"key":"ref2","first-page":"49","article-title":"Guided cost learning: Deep inverse optimal control via policy optimization","author":"finn","year":"0","journal-title":"International Conference on Machine Learning"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2018.8569340"},{"key":"ref1","author":"bojarski","year":"2016","journal-title":"End to End Learning for Self-Driving Cars"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992696"},{"key":"ref22","first-page":"63","author":"behrisch","year":"2011","journal-title":"SUMOSimulation of Urban MObility-An Overview"},{"key":"ref21","first-page":"267","article-title":"Approximately optimal approximate reinforcement learning","volume":"2","author":"kakade","year":"0","journal-title":"ICML"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2018.8569568"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1103\/PhysRevE.62.1805"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2018.8569612"}],"event":{"name":"2020 IEEE Intelligent Vehicles Symposium (IV)","location":"Las Vegas, NV, USA","start":{"date-parts":[[2020,10,19]]},"end":{"date-parts":[[2020,11,13]]}},"container-title":["2020 IEEE Intelligent Vehicles Symposium (IV)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9304518\/9304528\/09304668.pdf?arnumber=9304668","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,28]],"date-time":"2022-06-28T21:53:48Z","timestamp":1656453228000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9304668\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,10,19]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/iv47402.2020.9304668","relation":{},"subject":[],"published":{"date-parts":[[2020,10,19]]}}}