{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,10]],"date-time":"2026-03-10T15:15:30Z","timestamp":1773155730045,"version":"3.50.1"},"reference-count":32,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,6,8]],"date-time":"2022-06-08T00:00:00Z","timestamp":1654646400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,6,8]],"date-time":"2022-06-08T00:00:00Z","timestamp":1654646400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,6,8]]},"DOI":"10.23919\/acc53348.2022.9867557","type":"proceedings-article","created":{"date-parts":[[2022,9,5]],"date-time":"2022-09-05T20:24:10Z","timestamp":1662409450000},"page":"2409-2415","source":"Crossref","is-referenced-by-count":12,"title":["Driver Assistance Eco-driving and Transmission Control with Deep Reinforcement Learning"],"prefix":"10.23919","author":[{"given":"Lindsey","family":"Kerbel","sequence":"first","affiliation":[{"name":"Clemson University,Department of Automotive Engineering,Greenville,SC,USA,29607"}]},{"given":"Beshah","family":"Ayalew","sequence":"additional","affiliation":[{"name":"Clemson University,Department of Automotive Engineering,Greenville,SC,USA,29607"}]},{"given":"Andrej","family":"Ivanco","sequence":"additional","affiliation":[{"name":"Allison Transmission Inc.,Indianapolis,IN,USA,46222"}]},{"given":"Keith","family":"Loiselle","sequence":"additional","affiliation":[{"name":"Allison Transmission Inc.,Indianapolis,IN,USA,46222"}]}],"member":"263","reference":[{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.4209\/aaqr.2017.02.0080"},{"key":"ref31","article-title":"Vehicle testing regulations","year":"0"},{"key":"ref30","article-title":"Initializing neural networks","author":"katanforoosh","year":"2019"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1049\/iet-its.2020.0380"},{"key":"ref11","article-title":"The method of mass estimation considering system error in vehicle longitudinal dynamics","volume":"12","author":"nan","year":"2018","journal-title":"Energies"},{"key":"ref12","article-title":"Reinforcement learning: An Introduction","author":"barto","year":"2018"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2018.8500556"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.3390\/sym11091139"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2011.2157145"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2019.8916781"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/j.arcontrol.2010.02.002"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.23919\/ACC.2018.8430948"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2019.2947756"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.3182\/20140824-6-ZA-1003.02042"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.trd.2009.05.009"},{"key":"ref27","article-title":"Reinforcement learning and the reward engineering principle","author":"dewey","year":"2014","journal-title":"AAAI spring symposia"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.enpol.2009.10.021"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2011.2142182"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1103\/PhysRevE.62.1805"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/j.trpro.2014.10.092"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/SMILE45626.2019.8965298"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CCA.2016.7587984"},{"key":"ref2","article-title":"Sources of greenhouse gas emissions","year":"2015"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.3390\/app10155271"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1038\/nature18307"},{"key":"ref20","article-title":"Continuous-discrete reinforcement learning for hybrid control in robotics","author":"neunert","year":"2020","journal-title":"CoRR"},{"key":"ref22","first-page":"1054","article-title":"Safe and efficient off-policy reinforcement learning","author":"munos","year":"2016","journal-title":"Proceedings of the 30th International Conference on Neural Information Processing Systems"},{"key":"ref21","article-title":"Maximum a posteriori policy optimisation","author":"abdolmaleki","year":"2018","journal-title":"CoRR"},{"key":"ref24","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017","journal-title":"CoRR"},{"key":"ref23","article-title":"Trust region policy optimization","author":"schulman","year":"2015","journal-title":"CoRR"},{"key":"ref26","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2015","journal-title":"International Conference on Learning Representations"},{"key":"ref25","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2016"}],"event":{"name":"2022 American Control Conference (ACC)","location":"Atlanta, GA, USA","start":{"date-parts":[[2022,6,8]]},"end":{"date-parts":[[2022,6,10]]}},"container-title":["2022 American Control Conference (ACC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9866948\/9867142\/09867557.pdf?arnumber=9867557","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,10]],"date-time":"2022-10-10T20:24:25Z","timestamp":1665433465000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9867557\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,6,8]]},"references-count":32,"URL":"https:\/\/doi.org\/10.23919\/acc53348.2022.9867557","relation":{},"subject":[],"published":{"date-parts":[[2022,6,8]]}}}