{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T16:20:02Z","timestamp":1776183602674,"version":"3.50.1"},"reference-count":51,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,9,24]],"date-time":"2024-09-24T00:00:00Z","timestamp":1727136000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,9,24]],"date-time":"2024-09-24T00:00:00Z","timestamp":1727136000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,9,24]]},"DOI":"10.1109\/itsc58415.2024.10919498","type":"proceedings-article","created":{"date-parts":[[2025,3,21]],"date-time":"2025-03-21T19:00:11Z","timestamp":1742583611000},"page":"2651-2658","source":"Crossref","is-referenced-by-count":9,"title":["GOOSE: Goal-Conditioned Reinforcement Learning for Safety-Critical Scenario Generation"],"prefix":"10.1109","author":[{"given":"Joshua","family":"Ransiek","sequence":"first","affiliation":[{"name":"FZI Research Center for Information Technology,Karlsruhe,Germany"}]},{"given":"Johannes","family":"Plaum","sequence":"additional","affiliation":[{"name":"Torc Europe GmbH,Stuttgart,Germany"}]},{"given":"Jacob","family":"Langner","sequence":"additional","affiliation":[{"name":"FZI Research Center for Information Technology,Karlsruhe,Germany"}]},{"given":"Eric","family":"Sax","sequence":"additional","affiliation":[{"name":"KIT Karlsruhe Institute of Technology,Karlsruhe,Germany"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.2993730"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/iv55152.2023.10186735"},{"key":"ref3","volume-title":"International Organization for Standardization, Road vehicles - Safety of the intended functionality. ISO 21448","year":"2022"},{"key":"ref4","first-page":"1506","article-title":"Play to grade: testing coding games as classifying markov decision process","volume":"34","author":"Nie","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ASE.2019.00077"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CoG52621.2021.9619048"},{"key":"ref7","article-title":"Taking the scenic route: Auto-matic exploration for videogames","author":"Zhan","year":"2018","journal-title":"arXiv preprint"},{"key":"ref8","article-title":"Winning isn\u2019t everything: Training agents to play test modern games","volume-title":"AAAI Workshop on Reinforcement Learning in Games","author":"Borovikov","year":"2019"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ACSOS49614.2020.00038"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICST.2018.00020"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/3395363.3397354"},{"key":"ref12","article-title":"Drift: Deep reinforcement learning for functional software testing","author":"Harries","year":"2020","journal-title":"arXiv preprint"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/SEAA.2019.00032"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICST.2019.00023"},{"key":"ref15","article-title":"Hind-sight experience replay","volume":"30","author":"Andrychowicz","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref16","first-page":"1094","article-title":"Learning to achieve goals","volume":"2","author":"Kaelbling","year":"1993","journal-title":"IJCAI"},{"key":"ref17","first-page":"1312","article-title":"Universal value function approximators","volume-title":"International conference on machine learning","author":"Schaul","year":"2015"},{"key":"ref18","first-page":"1430","article-title":"Goal-conditioned reinforcement learning with imagined subgoals","volume-title":"International Conference on Machine Learning","author":"Chane-Sane","year":"2021"},{"key":"ref19","article-title":"Planning with goal-conditioned policies","volume":"32","author":"Nasiriany","year":"2019","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01679"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01026"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00095"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00978"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC55140.2022.9922440"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2018.8500400"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2019.8917242"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2019.8917403"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC45102.2020.9294729"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/IROS51168.2021.9636072"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC45102.2020.9294590"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9197351"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1177\/03611981211018697"},{"key":"ref33","article-title":"Scalable end-to-end autonomous vehicle testing via rare-event simulation","volume":"31","author":"O\u2019Kelly","year":"2018","journal-title":"Advances in neural information processing systems"},{"issue":"1","key":"ref34","doi-asserted-by":"crossref","first-page":"748","DOI":"10.1038\/s41467-021-21007-8","article-title":"Intelligent driving intelligence test for autonomous vehicles with naturalistic and adversarial environment","volume":"12","author":"Feng","year":"2021","journal-title":"Nature communications"},{"key":"ref35","article-title":"(Re)2H20: Autonomous driving scenario generation via reversely regularized hybrid offline-and-online reinforcement learning","author":"Niu","year":"2023","journal-title":"ar Xiv preprint arXiv:2302.13726"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICSE48619.2023.00155"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/IROS45743.2020.9340696"},{"key":"ref38","article-title":"Failure-scenario maker for rule-based agent using multi-agent adversarial reinforcement learning and its application to autonomous driving","author":"Wachi","year":"2019","journal-title":"arXiv preprint"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC57777.2023.10422130"},{"key":"ref40","volume-title":"The NURBS book","author":"Piegl","year":"2012"},{"key":"ref41","author":"Stoker","year":"1969","journal-title":"Differential geometry wiley"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2018.8500406"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3053159"},{"key":"ref44","volume-title":"ASAM OpenSCENARIO DSL","year":"2024"},{"issue":"157","key":"ref45","article-title":"Proposal for the 01 series of amendments to UN Regulation No. 157 (Automated Lane Keeping Systems)","volume-title":"Addendum 156 to the 1958 Agreement - UN Regulation","year":"2022"},{"key":"ref46","article-title":"Dropout q-functions for doubly efficient reinforcement learning","author":"Hiraoka","year":"2021","journal-title":"arXiv preprint"},{"key":"ref47","article-title":"A walk in the park: Learning to walk in 20 minutes with model-free reinforcement learning","author":"Smith","year":"2022","journal-title":"arXiv preprint"},{"key":"ref48","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"International conference on machine learning","author":"Haarnoja","year":"2018"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC57777.2023.10422528"},{"key":"ref50","article-title":"Argoverse 2: Next generation datasets for self-driving perception and forecasting","author":"Wilson","year":"2023","journal-title":"ar Xiv preprint arXiv:2301.00493"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1103\/PhysRevE.62.1805"}],"event":{"name":"2024 IEEE 27th International Conference on Intelligent Transportation Systems (ITSC)","location":"Edmonton, AB, Canada","start":{"date-parts":[[2024,9,24]]},"end":{"date-parts":[[2024,9,27]]}},"container-title":["2024 IEEE 27th International Conference on Intelligent Transportation Systems (ITSC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10919469\/10919190\/10919498.pdf?arnumber=10919498","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,3,21]],"date-time":"2025-03-21T21:39:08Z","timestamp":1742593148000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10919498\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,24]]},"references-count":51,"URL":"https:\/\/doi.org\/10.1109\/itsc58415.2024.10919498","relation":{},"subject":[],"published":{"date-parts":[[2024,9,24]]}}}