{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T15:54:13Z","timestamp":1776182053393,"version":"3.50.1"},"reference-count":42,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"8","license":[{"start":{"date-parts":[[2022,8,1]],"date-time":"2022-08-01T00:00:00Z","timestamp":1659312000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,8,1]],"date-time":"2022-08-01T00:00:00Z","timestamp":1659312000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,8,1]],"date-time":"2022-08-01T00:00:00Z","timestamp":1659312000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Intell. Transport. Syst."],"published-print":{"date-parts":[[2022,8]]},"DOI":"10.1109\/tits.2021.3115235","type":"journal-article","created":{"date-parts":[[2021,11,2]],"date-time":"2021-11-02T21:30:03Z","timestamp":1635888603000},"page":"12562-12571","source":"Crossref","is-referenced-by-count":21,"title":["Policy-Based Reinforcement Learning for Training Autonomous Driving Agents in Urban Areas With Affordance Learning"],"prefix":"10.1109","volume":"23","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6927-9543","authenticated-orcid":false,"given":"Marwa","family":"Ahmed","sequence":"first","affiliation":[{"name":"Institute for Intelligent Systems Research and Innovation (IISRI), Deakin University, Waurn Ponds, VIC, Australia"}]},{"given":"Ahmed","family":"Abobakr","sequence":"additional","affiliation":[{"name":"Faculty of Computers and Artificial Intelligence, Cairo University, Giza, Egypt"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4191-9083","authenticated-orcid":false,"given":"Chee Peng","family":"Lim","sequence":"additional","affiliation":[{"name":"Institute for Intelligent Systems Research and Innovation (IISRI), Deakin University, Waurn Ponds, VIC, Australia"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0360-5270","authenticated-orcid":false,"given":"Saeid","family":"Nahavandi","sequence":"additional","affiliation":[{"name":"Institute for Intelligent Systems Research and Innovation (IISRI), Deakin University, Waurn Ponds, VIC, Australia"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1098\/rsta.2010.0110"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/b978-0-12-379777-3.x5000-6"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/MITS.2014.2306552"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8794025"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2017.8317735"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2008.10.024"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460487"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/JAS.2020.1003033"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2015.09.011"},{"key":"ref10","volume-title":"Vehicle Dynamics and Control","author":"Rajamani","year":"2011"},{"key":"ref11","article-title":"Continuous control with deep reinforcement learning","author":"Lillicrap","year":"2015","journal-title":"arXiv:1509.02971"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.6144"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2018.8500556"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.3390\/electronics8050543"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989381"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2015.7317916"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ISCID.2016.2054"},{"key":"ref18","article-title":"End-to-end model-free reinforcement learning for urban driving using implicit affordances","author":"Toromanoff","year":"2019","journal-title":"arXiv:1911.10868"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11796"},{"key":"ref20","article-title":"CARLA: An open urban driving simulator","author":"Dosovitskiy","year":"2017","journal-title":"arXiv:1711.03938"},{"key":"ref21","article-title":"End-to-end deep reinforcement learning for lane keeping assist","author":"Sallab","year":"2016","journal-title":"arXiv:1612.04340"},{"key":"ref22","volume-title":"Torcs: The Open Racing Car Simulator","author":"Wymann","year":"2000"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2018.8500718"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1561\/0600000079"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/JAS.2018.7511186"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.378"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2017.8296962"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.232"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.690"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.312"},{"key":"ref31","article-title":"DenseNet: Implementing efficient ConvNet descriptor pyramids","author":"Iandola","year":"2014","journal-title":"arXiv:1404.1869"},{"key":"ref32","article-title":"Conditional affordance learning for driving in urban environments","author":"Sauer","year":"2018","journal-title":"arXiv:1806.06498"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.4324\/9781410605337-29"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298878"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1561\/2200000071"},{"key":"ref38","article-title":"A formal basis for the heuristic determination of minimum cost paths","author":"Appi","year":"1966"},{"key":"ref39","article-title":"Rethinking self-driving: Multi-task knowledge for better generalization and accident explanation ability","author":"Li","year":"2018","journal-title":"arXiv:1809.11100"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00942"},{"key":"ref41","first-page":"66","article-title":"Learning by cheating","volume-title":"Proc. Conf. Robot Learn.","author":"Chen"},{"key":"ref42","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014","journal-title":"arXiv:1412.6980"}],"container-title":["IEEE Transactions on Intelligent Transportation Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6979\/9853713\/09599578.pdf?arnumber=9599578","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,12]],"date-time":"2024-01-12T02:08:02Z","timestamp":1705025282000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9599578\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,8]]},"references-count":42,"journal-issue":{"issue":"8"},"URL":"https:\/\/doi.org\/10.1109\/tits.2021.3115235","relation":{},"ISSN":["1524-9050","1558-0016"],"issn-type":[{"value":"1524-9050","type":"print"},{"value":"1558-0016","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,8]]}}}