{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T17:05:31Z","timestamp":1774717531628,"version":"3.50.1"},"reference-count":37,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,5,29]],"date-time":"2023-05-29T00:00:00Z","timestamp":1685318400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,5,29]],"date-time":"2023-05-29T00:00:00Z","timestamp":1685318400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,5,29]]},"DOI":"10.1109\/icra48891.2023.10161492","type":"proceedings-article","created":{"date-parts":[[2023,7,4]],"date-time":"2023-07-04T17:20:56Z","timestamp":1688491256000},"page":"879-885","source":"Crossref","is-referenced-by-count":11,"title":["Handling Sparse Rewards in Reinforcement Learning Using Model Predictive Control"],"prefix":"10.1109","author":[{"given":"Murad","family":"Dawood","sequence":"first","affiliation":[{"name":"University of Bonn,Humanoid Robots Lab.,Germany"}]},{"given":"Nils","family":"Dengler","sequence":"additional","affiliation":[{"name":"University of Bonn,Humanoid Robots Lab.,Germany"}]},{"given":"Jorge","family":"de Heuvel","sequence":"additional","affiliation":[{"name":"University of Bonn,Humanoid Robots Lab.,Germany"}]},{"given":"Maren","family":"Bennewitz","sequence":"additional","affiliation":[{"name":"University of Bonn,Humanoid Robots Lab.,Germany"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Hind-sight experience replay","author":"Andrychowicz","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref2","article-title":"Leveraging demonstrations for deep reinforcement learning on robotics problems with sparse rewards","author":"Vecerik","year":"2017","journal-title":"arXiv preprint"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2020.2973193"},{"key":"ref4","article-title":"Goal-constrained sparse reinforcement learning for end-to-end driving","author":"Agarwal","year":"2021","journal-title":"arXiv preprint"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2018.2869644"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1049\/joe.2018.8314"},{"key":"ref7","article-title":"Integrating behavior cloning and reinforcement learning for improved performance in dense and sparse reward environments","author":"Goecks","year":"2019","journal-title":"arXiv preprint"},{"key":"ref8","article-title":"Learning from demonstrations for real world reinforcement learning","author":"Hester","year":"2017","journal-title":"arXiv preprint"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9196754"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8463162"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/RO-MAN53752.2022.9900554"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.3390\/s18072321"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICARCV50220.2020.9305513"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.ifacol.2020.12.2658"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ITCE48509.2020.9047783"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.ifacol.2018.11.038"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/j.ifacol.2020.12.1376"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICAR.2013.6766536"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487175"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11757"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/IV51971.2022.9827073"},{"key":"ref22","article-title":"Reinforcement learning with sparse rewards using guidance from offline demonstration","author":"Rengarajan","year":"2022","journal-title":"arXiv preprint"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8461203"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2022.3201705"},{"key":"ref26","first-page":"1","article-title":"Guided policy search","volume-title":"International conference on machine learning","author":"Levine"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/IROS45743.2020.9341021"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3191234"},{"key":"ref29","article-title":"Residual policy learning","author":"Silver","year":"2018","journal-title":"arXiv preprint"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1016\/j.ifacol.2020.12.2426"},{"key":"ref31","article-title":"Safe end-to-end imitation learning for model predictive control","author":"Lee","year":"2018","journal-title":"arXiv preprint"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3068662"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/s12532-021-00208-8"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1812.05905"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2004.1389727"},{"key":"ref36","first-page":"5","article-title":"ROS: an open-source robot operating system","volume-title":"ICRA workshop on open source software","volume":"3","author":"Quigley"},{"key":"ref37","article-title":"Addressing function approx-imation error in actor-critic methods","volume-title":"Proc. of the Intl. Conf. on Machine Learning(ICML)","author":"Fujimoto"}],"event":{"name":"2023 IEEE International Conference on Robotics and Automation (ICRA)","location":"London, United Kingdom","start":{"date-parts":[[2023,5,29]]},"end":{"date-parts":[[2023,6,2]]}},"container-title":["2023 IEEE International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10160211\/10160212\/10161492.pdf?arnumber=10161492","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,1]],"date-time":"2024-03-01T11:47:08Z","timestamp":1709293628000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10161492\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,5,29]]},"references-count":37,"URL":"https:\/\/doi.org\/10.1109\/icra48891.2023.10161492","relation":{},"subject":[],"published":{"date-parts":[[2023,5,29]]}}}