{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,21]],"date-time":"2026-01-21T17:11:28Z","timestamp":1769015488991,"version":"3.49.0"},"reference-count":25,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2018,6]]},"DOI":"10.1109\/ivs.2018.8500645","type":"proceedings-article","created":{"date-parts":[[2018,10,22]],"date-time":"2018-10-22T20:56:23Z","timestamp":1540241783000},"page":"226-231","source":"Crossref","is-referenced-by-count":34,"title":["Deep Q Learning Based High Level Driving Policy Determination"],"prefix":"10.1109","author":[{"given":"Kyushik","family":"Min","sequence":"first","affiliation":[]},{"given":"Hayoung","family":"Kim","sequence":"additional","affiliation":[]},{"given":"Kunsoo","family":"Huh","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2017.7995703"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"ref12","article-title":"End-to-end deep reinforcement learning for lane keeping assist","author":"sallab","year":"2016","journal-title":"arXiv preprint arXiv 1612 04340"},{"key":"ref13","doi-asserted-by":"crossref","DOI":"10.4271\/2017-01-1960","article-title":"Obstacle avoidance for self-driving vehicle with reinforcement learning","volume":"11","author":"zong","year":"2017","journal-title":"SAE International Journal of Passenger Cars-Electronic and Electrical Systems"},{"key":"ref14","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"arXiv preprint arXiv 1509 02971"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1512\/iumj.1957.6.56038"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2017.2743240"},{"key":"ref17","first-page":"2094","article-title":"Deep reinforcement learning with double q-learning","volume":"16","author":"van hasselt","year":"2016","journal-title":"AAAI"},{"key":"ref18","article-title":"Prioritized experience replay","author":"schaul","year":"2015","journal-title":"arXiv preprint arXiv 1511 05271"},{"key":"ref19","article-title":"Dueling network architectures for deep reinforcement learning","author":"wang","year":"2015","journal-title":"arXiv preprint arXiv 1511 05271"},{"key":"ref4","article-title":"Reduces google data centre cooling bill by 40%","author":"deepmind","year":"2016"},{"key":"ref3","doi-asserted-by":"crossref","first-page":"354","DOI":"10.1038\/nature24270","article-title":"Mastering the game of go without human knowledge","volume":"550","author":"silver","year":"2017","journal-title":"Nature"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref5","article-title":"Very deep convolutional networks for large-scale image recognition","author":"simonyan","year":"2014","journal-title":"arXiv preprint arXiv 1409 1556"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.23919\/ACC.2017.7963716"},{"key":"ref7","first-page":"91","article-title":"Faster r-cnn: Towards realtime object detection with region proposal networks","author":"ren","year":"2015","journal-title":"Advances in neural information processing systems"},{"key":"ref2","doi-asserted-by":"crossref","first-page":"484","DOI":"10.1038\/nature16961","article-title":"Mastering the game of go with deep neural networks and tree search","volume":"529","author":"silver","year":"2016","journal-title":"Nature"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2017.7995709"},{"key":"ref1","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref20","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"International Conference on Machine Learning"},{"key":"ref22","article-title":"A distributional perspective on reinforcement learning","author":"bellemare","year":"2017","journal-title":"arXiv preprint arXiv 1707 07816"},{"key":"ref21","article-title":"Noisy networks for exploration","author":"fortunato","year":"2017","journal-title":"arXiv preprint arXiv 1706 10295"},{"key":"ref24","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014","journal-title":"arXiv preprint arXiv 1412 6980"},{"key":"ref23","article-title":"Rainbow: Combining improvements in deep reinforcement learning","author":"hessel","year":"2017","journal-title":"arXiv preprint arXiv 1710 02298"},{"key":"ref25","first-page":"265","article-title":"Tensorflow: A system for large-scale machine learning","volume":"16","author":"abadi","year":"2016","journal-title":"OSDI"}],"event":{"name":"2018 IEEE Intelligent Vehicles Symposium (IV)","location":"Changshu","start":{"date-parts":[[2018,6,26]]},"end":{"date-parts":[[2018,6,30]]}},"container-title":["2018 IEEE Intelligent Vehicles Symposium (IV)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8472796\/8500355\/08500645.pdf?arnumber=8500645","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,26]],"date-time":"2022-01-26T14:38:11Z","timestamp":1643207891000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8500645\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018,6]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/ivs.2018.8500645","relation":{},"subject":[],"published":{"date-parts":[[2018,6]]}}}