{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,21]],"date-time":"2026-02-21T18:18:32Z","timestamp":1771697912563,"version":"3.50.1"},"reference-count":28,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2019,9,1]],"date-time":"2019-09-01T00:00:00Z","timestamp":1567296000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,9,1]],"date-time":"2019-09-01T00:00:00Z","timestamp":1567296000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,9,1]],"date-time":"2019-09-01T00:00:00Z","timestamp":1567296000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Ministry of Land Infrastructure and Transport of Korean government","award":["18TLRPB101406-04"],"award-info":[{"award-number":["18TLRPB101406-04"]}]},{"name":"Ministry of Trade Industry and Energy MOTIE Korea","award":["10079730"],"award-info":[{"award-number":["10079730"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Intell. Veh."],"published-print":{"date-parts":[[2019,9]]},"DOI":"10.1109\/tiv.2019.2919467","type":"journal-article","created":{"date-parts":[[2019,5,28]],"date-time":"2019-05-28T19:48:05Z","timestamp":1559072885000},"page":"416-424","source":"Crossref","is-referenced-by-count":68,"title":["Deep Distributional Reinforcement Learning Based High-Level Driving Policy Determination"],"prefix":"10.1109","volume":"4","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8506-1077","authenticated-orcid":false,"given":"Kyushik","family":"Min","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0290-5121","authenticated-orcid":false,"given":"Hayoung","family":"Kim","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7179-7841","authenticated-orcid":false,"given":"Kunsoo","family":"Huh","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Distributional reinforcement learning with quantile regression","author":"dabney","year":"2017"},{"key":"ref11","article-title":"Distributional reinforcement learning with quantile regression","author":"dabney","year":"0","journal-title":"Proc 30nd AAAI Conf Artif Intell"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.23919\/ACC.2017.7963716"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2017.7995709"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2017.7995703"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.2352\/ISSN.2470-1173.2017.19.AVM-023"},{"key":"ref17","article-title":"Combining deep reinforcement learning and safety based control for autonomous driving","author":"xiong","year":"2016"},{"key":"ref18","article-title":"Safe, multi-agent, reinforcement learning for autonomous driving","author":"shalev-shwartz","year":"2016"},{"key":"ref19","article-title":"Reinforcement learning for robots using neural networks","author":"lin","year":"1993"},{"key":"ref28","article-title":"Unity: A general platform for intelligent agents","author":"juliani","year":"2018"},{"key":"ref4","article-title":"DeepMind reduces Google data centre cooling bill by 40%","year":"2016"},{"key":"ref27","first-page":"249","article-title":"Understanding the difficulty of training deep feedforward neural networks","author":"glorot","year":"0","journal-title":"Proc 13th Int Conf Artif Intell Statist"},{"key":"ref3","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref6","first-page":"9191","article-title":"Visual reinforcement learning with imagined goals","author":"nair","year":"2018","journal-title":"Proc Adv Neural Inform Process Syst"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487173"},{"key":"ref8","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017"},{"key":"ref7","first-page":"1889","article-title":"Trust region policy optimization","author":"schulman","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref2","doi-asserted-by":"crossref","first-page":"354","DOI":"10.1038\/nature24270","article-title":"Mastering the game of go without human knowledge","volume":"550","author":"silver","year":"2017","journal-title":"Nature"},{"key":"ref9","first-page":"449","article-title":"A distributional perspective on reinforcement learning","author":"bellemare","year":"0","journal-title":"Proc 34th Int Conf Mach Learn -Volume"},{"key":"ref1","doi-asserted-by":"crossref","first-page":"484","DOI":"10.1038\/nature16961","article-title":"Mastering the game of go with deep neural networks and tree search","volume":"529","author":"silver","year":"2016","journal-title":"Nature"},{"key":"ref20","first-page":"2094","article-title":"Deep reinforcement learning with double q-learning","volume":"2","author":"van hasselt","year":"0","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"ref22","first-page":"1995","article-title":"Dueling network architectures for deep reinforcement learning","volume":"48","author":"wang","year":"0","journal-title":"Proc 33rd Int Conf Mach Learn"},{"key":"ref21","first-page":"2613","article-title":"Double q-learning","author":"hasselt","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1257\/jep.15.4.143"},{"key":"ref26","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2015","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref25","first-page":"265","article-title":"Tensorflow: A system for large-scale machine learning.","volume":"16","author":"abadi","year":"0","journal-title":"Proc 10th USENIX Conf Oper Syst Des Implementation"}],"container-title":["IEEE Transactions on Intelligent Vehicles"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7274857\/8813110\/08723635.pdf?arnumber=8723635","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,13]],"date-time":"2022-07-13T21:13:47Z","timestamp":1657746827000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8723635\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,9]]},"references-count":28,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/tiv.2019.2919467","relation":{},"ISSN":["2379-8904","2379-8858"],"issn-type":[{"value":"2379-8904","type":"electronic"},{"value":"2379-8858","type":"print"}],"subject":[],"published":{"date-parts":[[2019,9]]}}}