{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,27]],"date-time":"2026-01-27T13:37:26Z","timestamp":1769521046761,"version":"3.49.0"},"reference-count":20,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,12,1]],"date-time":"2020-12-01T00:00:00Z","timestamp":1606780800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,12,1]],"date-time":"2020-12-01T00:00:00Z","timestamp":1606780800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,12,1]],"date-time":"2020-12-01T00:00:00Z","timestamp":1606780800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,12,1]]},"DOI":"10.1109\/ssci47803.2020.9308525","type":"proceedings-article","created":{"date-parts":[[2021,1,5]],"date-time":"2021-01-05T18:12:38Z","timestamp":1609870358000},"page":"745-752","source":"Crossref","is-referenced-by-count":5,"title":["Optimizing Agent Training with Deep Q-Learning on a Self-Driving Reinforcement Learning Environment"],"prefix":"10.1109","author":[{"given":"Pedro","family":"Rodrigues","sequence":"first","affiliation":[]},{"given":"Susana","family":"Vieira","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Deep reinforcement learning with double q-learning","author":"hasselt hado","year":"2016","journal-title":"THIRTIETH AAAI Conference on Artificial Intelligence"},{"key":"ref11","article-title":"Prioritized experience replay","author":"schaul","year":"2015","journal-title":"arXiv preprint arXiv 1511 05952"},{"key":"ref12","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v32i1.11796","article-title":"Rainbow: Combining improvements in deep reinforcement learning","author":"hessel","year":"2018","journal-title":"Thirty-Second AAAI Conference on Artificial Intelligence"},{"key":"ref13","doi-asserted-by":"crossref","first-page":"279","DOI":"10.1007\/BF00992698","article-title":"Q-learning","volume":"8","author":"watkins","year":"1992","journal-title":"Machine Learning"},{"key":"ref14","author":"aalto","year":"0","journal-title":"DQN reinforcement learning agent applied to 2d car racing environment"},{"key":"ref15","article-title":"Reinforcement learning for robots using neural networks. No. CMU-CS-93-103","author":"lin","year":"1993","journal-title":"Carnegie-mellon Univ Pittsburgh Pa School of Computer Science"},{"key":"ref16","article-title":"Imagenet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Advances in neural information processing systems"},{"key":"ref17","article-title":"L2 regularization versus batch and weight normalization","author":"laarhoven twan","year":"2017","journal-title":"arXiv preprint arXiv 1706 05350"},{"key":"ref18","article-title":"Weight agnostic neural networks","author":"gaier","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref19","author":"khan","year":"2016","journal-title":"Car racing using reinforcement learning"},{"key":"ref4","author":"stekolshchik","year":"2019","journal-title":"CarRacing with PPO"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3321707.3321817"},{"key":"ref6","article-title":"Dueling network architectures for deep reinforcement learning","author":"wang","year":"2016","journal-title":"International Conference on Machine Learning"},{"key":"ref5","author":"prieur","year":"2017","journal-title":"Deep-Q learning using simple feedforward neural network"},{"key":"ref8","article-title":"Playing atari with deep reinforcement learning","author":"mnih","year":"2013","journal-title":"arXiv 1312 5602"},{"key":"ref7","article-title":"Exploration of reinforcement learning in computer games","author":"guan","year":"2018"},{"key":"ref2","first-page":"2450","article-title":"Recurrent world models facilitate policy evolution","author":"ha","year":"2018","journal-title":"Advances in neural information processing systems"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref9","article-title":"Deep recurrent q-learning for partially observable mdps","author":"hausknecht","year":"2015","journal-title":"2015 AAAI Fall Symposium Series"},{"key":"ref20","author":"jang","year":"2017","journal-title":"Reinforcement car racing with A3C"}],"event":{"name":"2020 IEEE Symposium Series on Computational Intelligence (SSCI)","location":"Canberra, ACT, Australia","start":{"date-parts":[[2020,12,1]]},"end":{"date-parts":[[2020,12,4]]}},"container-title":["2020 IEEE Symposium Series on Computational Intelligence (SSCI)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9308061\/9308107\/09308525.pdf?arnumber=9308525","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,10]],"date-time":"2022-12-10T12:32:05Z","timestamp":1670675525000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9308525\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,12,1]]},"references-count":20,"URL":"https:\/\/doi.org\/10.1109\/ssci47803.2020.9308525","relation":{},"subject":[],"published":{"date-parts":[[2020,12,1]]}}}