{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,21]],"date-time":"2025-08-21T17:48:19Z","timestamp":1755798499088},"reference-count":22,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,8,5]],"date-time":"2024-08-05T00:00:00Z","timestamp":1722816000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,8,5]],"date-time":"2024-08-05T00:00:00Z","timestamp":1722816000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,8,5]]},"DOI":"10.1109\/cog60054.2024.10645657","type":"proceedings-article","created":{"date-parts":[[2024,8,28]],"date-time":"2024-08-28T18:44:09Z","timestamp":1724870649000},"page":"1-4","source":"Crossref","is-referenced-by-count":1,"title":["A Benchmark Environment for Offline Reinforcement Learning in Racing Games"],"prefix":"10.1109","author":[{"given":"Girolamo","family":"Macaluso","sequence":"first","affiliation":[{"name":"University of Florence"}]},{"given":"Alessandro","family":"Sestini","sequence":"additional","affiliation":[{"name":"SEED - Electronic Arts (EA)"}]},{"given":"Andrew D.","family":"Bagdanov","sequence":"additional","affiliation":[{"name":"University of Florence"}]}],"member":"263","reference":[{"doi-asserted-by":"publisher","key":"ref1","DOI":"10.13140\/RG.2.2.18893.74727"},{"key":"ref2","doi-asserted-by":"crossref","DOI":"10.1109\/LRA.2021.3064284","article-title":"Superhuman performance in gran turismo sport using deep reinforcement learning","author":"Fuchs","year":"2021","journal-title":"IEEE Robotics and Automation Letters"},{"doi-asserted-by":"publisher","key":"ref3","DOI":"10.1109\/TG.2022.3226910"},{"doi-asserted-by":"publisher","key":"ref4","DOI":"10.1109\/CoG47356.2020.9231552"},{"key":"ref5","article-title":"Offline reinforcement learning: Tutorial, review, and perspectives on open problems","author":"Levine","year":"2020","journal-title":"arXiv preprint arXiv:2005.01643"},{"year":"2020","article-title":"Trackmania","key":"ref6"},{"key":"ref7","article-title":"Unity: A general platform for intelligent agents","author":"Juliani","year":"2018","journal-title":"arXiv preprint arXiv:1809.02627"},{"key":"ref8","article-title":"Offline reinforcement learning with implicit q-learning","author":"Kostrikov","year":"2021","journal-title":"arXiv preprint arXiv:2110.06169"},{"key":"ref9","article-title":"A minimalist approach to offline reinforcement learning","author":"Fujimoto","year":"2021","journal-title":"Advances in neural information processing systems"},{"key":"ref10","article-title":"Conservative q-learning for offline reinforcement learning","author":"Kumar","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"doi-asserted-by":"publisher","key":"ref11","DOI":"10.1109\/CoG57401.2023.10333201"},{"key":"ref12","article-title":"D4rl: Datasets for deep data-driven reinforcement learning","author":"Fu","year":"2020","journal-title":"arXiv preprint arXiv:2004.07219"},{"doi-asserted-by":"publisher","key":"ref13","DOI":"10.1109\/CoG57401.2023.10333234"},{"key":"ref14","article-title":"Awac: Accelerating online reinforcement learning with offline datasets","author":"Nair","year":"2020","journal-title":"arXiv preprint arXiv:2006.09359"},{"key":"ref15","first-page":"1702","article-title":"Offline-to-online reinforcement learning via balanced replay and pessimistic q-ensemble","volume-title":"Conference on Robot Learning","author":"Lee"},{"volume-title":"Gymnasium","year":"2023","author":"Towers","key":"ref16"},{"key":"ref17","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv preprint arXiv:1707.06347"},{"volume-title":"International conference on machine learning","author":"Haarnoja","article-title":"Soft actor-critic: Offpolicy maximum entropy deep reinforcement learning with a stochastic actor","key":"ref18"},{"volume-title":"International conference on machine learning","author":"Fujimoto","article-title":"Addressing function approximation error in actor-critic methods","key":"ref19"},{"volume-title":"International Conference on Machine Learning","author":"Uchendu","article-title":"Jump-start reinforcement learning","key":"ref20"},{"key":"ref21","article-title":"Policy expansion for bridging offline-toonline reinforcement learning","author":"Zhang","year":"2023","journal-title":"arXiv preprint arXiv:2302.00935"},{"key":"ref22","article-title":"Small dataset, big gains Enhancing reinforcement learning by offline pre-training with modelbased augmentation","author":"Macaluso","year":"2024","journal-title":"Computer Sciences & Mathematics Forum"}],"event":{"name":"2024 IEEE Conference on Games (CoG)","start":{"date-parts":[[2024,8,5]]},"location":"Milan, Italy","end":{"date-parts":[[2024,8,8]]}},"container-title":["2024 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10645493\/10645533\/10645657.pdf?arnumber=10645657","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,2]],"date-time":"2024-09-02T04:30:47Z","timestamp":1725251447000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10645657\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,8,5]]},"references-count":22,"URL":"https:\/\/doi.org\/10.1109\/cog60054.2024.10645657","relation":{},"subject":[],"published":{"date-parts":[[2024,8,5]]}}}