{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,4]],"date-time":"2025-09-04T14:05:22Z","timestamp":1756994722082,"version":"3.40.2"},"reference-count":41,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,9,24]],"date-time":"2024-09-24T00:00:00Z","timestamp":1727136000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,9,24]],"date-time":"2024-09-24T00:00:00Z","timestamp":1727136000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,9,24]]},"DOI":"10.1109\/itsc58415.2024.10920216","type":"proceedings-article","created":{"date-parts":[[2025,3,21]],"date-time":"2025-03-21T19:00:11Z","timestamp":1742583611000},"page":"3922-3929","source":"Crossref","is-referenced-by-count":1,"title":["F1tenth Autonomous Racing With Offline Reinforcement Learning Methods"],"prefix":"10.1109","author":[{"given":"Prajwal","family":"Koirala","sequence":"first","affiliation":[{"name":"Iowa State University,Ames,Iowa"}]},{"given":"Cody","family":"Fleming","sequence":"additional","affiliation":[{"name":"Iowa State University,Ames,Iowa"}]}],"member":"263","reference":[{"key":"ref1","article-title":"An empirical evaluation of deep learning on highway driving","author":"Huval","year":"2015","journal-title":"arXiv preprint"},{"key":"ref2","article-title":"A review of tracking, prediction and decision making methods for autonomous driving","author":"Leon","year":"2019","journal-title":"arXiv preprint"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TIV.2024.3351131"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CCNC49033.2022.9700730"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/WACV45572.2020.9093332"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/OJITS.2022.3181510"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC45102.2020.9294553"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-27645-3_2"},{"key":"ref9","first-page":"20 132","article-title":"A minimalist approach to offline reinforcement learning","volume":"34","author":"Fujimoto","year":"2021","journal-title":"Advances in neural information processing systems"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3250269"},{"key":"ref11","article-title":"Model-based deep reinforcement learning for autonomous racing","author":"Brunnbauer","year":"2021","journal-title":"Ph.D. dissertation, Wien"},{"key":"ref12","article-title":"Offline reinforcement learning: Tutorial, review, and perspectives on open problems","author":"Levine","year":"2020","journal-title":"arXiv preprint"},{"key":"ref13","article-title":"Corl: Research-oriented deep offline reinforcement learning library","volume":"36","author":"Tarasov","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref14","article-title":"F1tenth: An open-source evaluation environment for continuous control and reinforcement learning","volume-title":"Proceedings of Machine Learning Research","volume":"123","author":"O\u2019Kelly"},{"volume-title":"Racecar gym","year":"2021","author":"Brunnbauer","key":"ref15"},{"journal-title":"Pybullet, a python module for physics simulation for games, robotics and machine learning","year":"2016","author":"Coumans","key":"ref16"},{"key":"ref17","article-title":"Solving offline reinforcement learning with decision tree regression","author":"Koirala","year":"2024","journal-title":"arXiv preprint"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/2939672.2939785"},{"key":"ref19","first-page":"15084","article-title":"Decision transformer: Reinforcement learning via sequence modeling","volume":"34","author":"Chen","year":"2021","journal-title":"Advances in neural information processing systems"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1177\/02783649241273668"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref22","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume":"33","author":"Ho","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref23","article-title":"Offline reinforcement learning with implicit q-learning","author":"Kostrikov","year":"2021","journal-title":"arXiv preprint"},{"key":"ref24","first-page":"1179","article-title":"Conservative qlearning for offline reinforcement learning","volume":"33","author":"Kumar","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref25","article-title":"Awac: Accelerating online reinforcement learning with offline datasets","author":"Nair","year":"2020","journal-title":"arXiv preprint"},{"key":"ref26","first-page":"1719","article-title":"Plas: Latent action space for offline reinforcement learning","volume-title":"Conference on Robot Learning. PMLR","author":"Zhou"},{"issue":"315","key":"ref27","first-page":"1","article-title":"d3rlpy: An offline deep reinforcement learning library","volume":"23","author":"Seno","year":"2022","journal-title":"Journal of Machine Learning Research"},{"key":"ref28","first-page":"1861","article-title":"Soft actor-critic: Offpolicy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"International conference on machine learning. PMLR","author":"Haarnoja"},{"key":"ref29","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv preprint"},{"journal-title":"Implementation of the pure pursuit path tracking algorithm","year":"1992","author":"Conlter","key":"ref30"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1002\/oca.2123"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2019.2926677"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ECC.2016.7810413"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA46639.2022.9811650"},{"key":"ref35","article-title":"Dream to control: Learning behaviors by latent imagination","author":"Hafner","year":"2019","journal-title":"arXiv preprint"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2023.3295252"},{"issue":"8","key":"ref38","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI blog"},{"key":"ref39","article-title":"Actor-critic algorithms","volume":"12","author":"Konda","year":"1999","journal-title":"Advances in neural information processing systems"},{"key":"ref40","article-title":"Advantage-weighted regression: Simple and scalable off-policy reinforcement learning","author":"Peng","year":"2019","journal-title":"arXiv preprint"},{"key":"ref41","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"International conference on machine learning. PMLR","author":"Fujimoto"},{"issue":"268","key":"ref42","first-page":"1","article-title":"Stable-baselines3: Reliable reinforcement learning implementations","volume":"22","author":"Raffin","year":"2021","journal-title":"Journal of Machine Learning Research"}],"event":{"name":"2024 IEEE 27th International Conference on Intelligent Transportation Systems (ITSC)","start":{"date-parts":[[2024,9,24]]},"location":"Edmonton, AB, Canada","end":{"date-parts":[[2024,9,27]]}},"container-title":["2024 IEEE 27th International Conference on Intelligent Transportation Systems (ITSC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10919469\/10919190\/10920216.pdf?arnumber=10920216","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,3,21]],"date-time":"2025-03-21T21:10:05Z","timestamp":1742591405000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10920216\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,24]]},"references-count":41,"URL":"https:\/\/doi.org\/10.1109\/itsc58415.2024.10920216","relation":{},"subject":[],"published":{"date-parts":[[2024,9,24]]}}}