{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T14:21:20Z","timestamp":1766067680280,"version":"3.28.0"},"reference-count":42,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,12,27]],"date-time":"2022-12-27T00:00:00Z","timestamp":1672099200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,12,27]],"date-time":"2022-12-27T00:00:00Z","timestamp":1672099200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,12,27]]},"DOI":"10.1109\/iccspa55860.2022.10018994","type":"proceedings-article","created":{"date-parts":[[2023,1,24]],"date-time":"2023-01-24T19:14:50Z","timestamp":1674587690000},"page":"1-6","source":"Crossref","is-referenced-by-count":4,"title":["Safe Reinforcement Learning using Data-Driven Predictive Control"],"prefix":"10.1109","author":[{"given":"Mahmoud","family":"Selim","sequence":"first","affiliation":[{"name":"Ain Shams University,Cairo,Egypt"}]},{"given":"Amr","family":"Alanwar","sequence":"additional","affiliation":[{"name":"Jacobs University,Bremen,Germany"}]},{"given":"M. Watheq","family":"El-Kharashi","sequence":"additional","affiliation":[{"name":"Ain Shams University,Cairo,Egypt"}]},{"given":"Hazem M.","family":"Abbas","sequence":"additional","affiliation":[{"name":"Ain Shams University,Cairo,Egypt"}]},{"given":"Karl H.","family":"Johansson","sequence":"additional","affiliation":[{"name":"KTH Royal Institute of Technology,Stockholm,Sweden"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2011.2165689"},{"key":"ref2","volume-title":"Introduction to reinforcement learning","volume":"135","author":"Sutton","year":"1998"},{"issue":"2","key":"ref3","doi-asserted-by":"crossref","first-page":"267","DOI":"10.1023\/A:1017940631555","article-title":"Risk-sensitive reinforcement learning","volume":"49","author":"Mihatsch","year":"2002","journal-title":"Machine learning"},{"journal-title":"Safe exploration in markov decision processes","year":"2012","author":"Moldovan","key":"ref4"},{"issue":"1","key":"ref5","first-page":"1437","article-title":"A comprehensive survey on safe reinforcement learning","volume":"16","author":"Garcia","year":"2015","journal-title":"Journal of Machine Learning Research"},{"journal-title":"Combating the compounding-error problem with a multi-step model","year":"2019","author":"Asadi","key":"ref6"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1016\/j.ejcon.2022.100666"},{"journal-title":"Constrained policy optimization","year":"2017","author":"Achiam","key":"ref8"},{"key":"ref9","article-title":"Reward constrained policy optimization","volume-title":"International Conference on Learning Representations","author":"Tessler","year":"2018"},{"journal-title":"Policy gradients beyond expectations: Conditional value-at-risk","year":"2014","author":"Tamar","key":"ref10"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-335-6.50021-0"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1666"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1162\/NECO_a_00600"},{"journal-title":"Safe policy search for lifelong reinforcement learning with sublinear regret","year":"2015","author":"Ammar","key":"ref14"},{"journal-title":"Safe exploration in finite markov decision processes with gaussian processes","year":"2016","author":"Turchetta","key":"ref15"},{"journal-title":"Leave no trace: Learning to reset for safe and autonomous reinforcement learning","year":"2017","author":"Eysenbach","key":"ref16"},{"key":"ref17","first-page":"1037","article-title":"Smart exploration in reinforcement learning using absolute temporal difference errors","volume-title":"Proceedings of the 2013 International Conference on Autonomous Agents and Multi-Agent Systems","author":"Gehring","year":"2013"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/1102351.1102352"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3135569"},{"key":"ref20","first-page":"1357","article-title":"Robot reinforcement learning on the constraint manifold","volume-title":"Conference on Robot Learning","author":"Liu"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2018.07.004"},{"journal-title":"Control barrier functions with unmodeled dynamics using integral quadratic constraints","year":"2021","author":"Seiler","key":"ref22"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ojcsys.2023.3256305"},{"journal-title":"Safe exploration in continuous action spaces","author":"Dalal","key":"ref24"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3192205"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2021.109597"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511546877"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.3390\/s20020426"},{"journal-title":"On the role of planning in model-based deep reinforcement learning","year":"2020","author":"Hamrick","key":"ref29"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3190100"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3063989"},{"key":"ref32","article-title":"Safe reinforcement learning using advantage-based intervention","author":"Wagener","year":"2021","journal-title":"arXiv preprint"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/BF02684450"},{"volume-title":"Reachability analysis and its application to the safety assessment of autonomous cars","year":"2010","author":"Althoff","key":"ref34"},{"key":"ref35","first-page":"163","article-title":"Data-driven reachability analysis using matrix zonotopes","volume-title":"Learning for Dynamics and Control","author":"Alanwar","year":"2021"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1177\/0278364920943266"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2022.3154715"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793905"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1177\/0278364920950795"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2019.8917052"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/tac.2023.3257167"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1145\/3387168.3387199"}],"event":{"name":"2022 5th International Conference on Communications, Signal Processing, and their Applications (ICCSPA)","start":{"date-parts":[[2022,12,27]]},"location":"Cairo, Egypt","end":{"date-parts":[[2022,12,29]]}},"container-title":["2022 5th International Conference on Communications, Signal Processing, and their Applications (ICCSPA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10018924\/10018967\/10018994.pdf?arnumber=10018994","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,13]],"date-time":"2024-02-13T05:55:12Z","timestamp":1707803712000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10018994\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,12,27]]},"references-count":42,"URL":"https:\/\/doi.org\/10.1109\/iccspa55860.2022.10018994","relation":{},"subject":[],"published":{"date-parts":[[2022,12,27]]}}}