{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,4,30]],"date-time":"2025-04-30T04:49:40Z","timestamp":1745988580695},"reference-count":28,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,10,24]],"date-time":"2020-10-24T00:00:00Z","timestamp":1603497600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,10,24]],"date-time":"2020-10-24T00:00:00Z","timestamp":1603497600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,10,24]],"date-time":"2020-10-24T00:00:00Z","timestamp":1603497600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,10,24]]},"DOI":"10.1109\/iros45743.2020.9341606","type":"proceedings-article","created":{"date-parts":[[2021,2,13]],"date-time":"2021-02-13T02:26:48Z","timestamp":1613183208000},"page":"5994-6001","source":"Crossref","is-referenced-by-count":8,"title":["A Framework for Online Updates to Safe Sets for Uncertain Dynamics"],"prefix":"10.1109","author":[{"given":"Jennifer C.","family":"Shih","sequence":"first","affiliation":[]},{"given":"Franziska","family":"Meier","sequence":"additional","affiliation":[]},{"given":"Akshara","family":"Rai","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"article-title":"Safe model-based reinforcement learning with stability guarantees","year":"0","author":"berkenkamp","key":"ref10"},{"key":"ref11","first-page":"8092","article-title":"A lyapunov-based approach to safe reinforcement learning","author":"chow","year":"2018","journal-title":"Advances in neural information processing systems"},{"article-title":"Lyapunov-based safe policy optimization for continuous control","year":"2019","author":"chow","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2018.2797194"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2017.8264092"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2016.7798509"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.23919\/ACC.2017.7963818"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2017.8263977"},{"key":"ref18","article-title":"A Toolbox of Level Set Methods","author":"mitchell","year":"2007","journal-title":"UBC Department of Computer Science Technical Report TR-2007-11"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CDC40024.2019.9030133"},{"journal-title":"KUKA LBR iiwa","year":"0","key":"ref28"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8794107"},{"article-title":"Improving sample efficiency in model-free reinforcement learning from images","year":"2019","author":"yarats","key":"ref27"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2018.2876389"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460471"},{"journal-title":"Reinforcement Learning An Introduction","year":"2018","author":"sutton","key":"ref5"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2018.8619829"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33013387"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793919"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2018.8619572"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2005.851439"},{"key":"ref20","first-page":"4754","article-title":"Deep reinforcement learning in a handful of trials using probabilistic dynamics models","author":"chua","year":"2018","journal-title":"Advances in neural information processing systems"},{"article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","year":"2017","author":"haarnoja","key":"ref22"},{"key":"ref21","first-page":"162","article-title":"Curious ilqr: Resolving uncertainty in model-based rl","author":"bechtle","year":"2020","journal-title":"Conference on Robot Learning"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2010.08.011"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.1985.268648"},{"article-title":"Learning-based model predictive control for safe exploration and reinforcement learning","year":"2019","author":"koller","key":"ref26"},{"journal-title":"Pattern Recognition and Machine Learning","year":"2007","author":"bishop","key":"ref25"}],"event":{"name":"2020 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","start":{"date-parts":[[2020,10,24]]},"location":"Las Vegas, NV, USA","end":{"date-parts":[[2021,1,24]]}},"container-title":["2020 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9340668\/9340635\/09341606.pdf?arnumber=9341606","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,28]],"date-time":"2022-06-28T21:57:04Z","timestamp":1656453424000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9341606\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,10,24]]},"references-count":28,"URL":"https:\/\/doi.org\/10.1109\/iros45743.2020.9341606","relation":{},"subject":[],"published":{"date-parts":[[2020,10,24]]}}}