{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,24]],"date-time":"2025-10-24T08:26:21Z","timestamp":1761294381059,"version":"3.28.0"},"reference-count":36,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,5,30]],"date-time":"2021-05-30T00:00:00Z","timestamp":1622332800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,5,30]],"date-time":"2021-05-30T00:00:00Z","timestamp":1622332800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,5,30]]},"DOI":"10.1109\/icra48506.2021.9561147","type":"proceedings-article","created":{"date-parts":[[2021,10,20]],"date-time":"2021-10-20T00:28:35Z","timestamp":1634689715000},"page":"10617-10624","source":"Crossref","is-referenced-by-count":4,"title":["FISAR: Forward Invariant Safe Reinforcement Learning with a Deep Neural Network-Based Optimizer"],"prefix":"10.1109","author":[{"given":"Chuangchuang","family":"Sun","sequence":"first","affiliation":[{"name":"Massachusetts Institute of Technology,Laboratory for Information &#x0026; Decision Systems (LIDS),Cambridge,MA,02139"}]},{"given":"Dong-Ki","family":"Kim","sequence":"additional","affiliation":[{"name":"Massachusetts Institute of Technology,Laboratory for Information &#x0026; Decision Systems (LIDS),Cambridge,MA,02139"}]},{"given":"Jonathan P.","family":"How","sequence":"additional","affiliation":[{"name":"Massachusetts Institute of Technology,Laboratory for Information &#x0026; Decision Systems (LIDS),Cambridge,MA,02139"}]}],"member":"263","reference":[{"article-title":"Safe exploration in continuous action spaces","year":"2018","author":"dalal","key":"ref33"},{"key":"ref32","article-title":"A method of solving a convex programming problem with convergence rate O(1\/k2)","volume":"27","author":"nesterov","year":"0","journal-title":"Soviet Mathematics Dokl"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"article-title":"Adam: A method for stochastic optimization","year":"2014","author":"kingma","key":"ref30"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1137\/17M1147214"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1145\/227683.227684"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2013.2297683"},{"key":"ref10","volume":"87","author":"nesterov","year":"2013","journal-title":"Introductory Lectures on Convex Optimization A Basic Course"},{"key":"ref11","first-page":"8092","article-title":"A lyapunov-based approach to safe reinforcement learning","author":"chow","year":"2018","journal-title":"Advances in neural information processing systems"},{"key":"ref12","first-page":"908","article-title":"Safe model-based reinforcement learning with stability guarantees","author":"berkenkamp","year":"2017","journal-title":"Advances in neural information processing systems"},{"article-title":"The lyapunov neural network: Adaptive stability certification for safe learning of dynamical systems","year":"2018","author":"richards","key":"ref13"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2016.2638961"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33013387"},{"article-title":"Safe reinforcement learning of control-affine systems with vertex networks","year":"2020","author":"zheng","key":"ref16"},{"key":"ref17","doi-asserted-by":"crossref","DOI":"10.15607\/RSS.2020.XVI.088","article-title":"Reinforcement learning for safety-critical control under model uncertainty, using control lyapunov functions and control barrier functions","author":"choi","year":"2020"},{"article-title":"Safe reinforcement learning via shielding","year":"2017","author":"alshiekh","key":"ref18"},{"key":"ref19","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v32i1.12107","article-title":"Safe reinforcement learning via formal methods","author":"fulton","year":"2018","journal-title":"AAAI Conference on Artificial Intelligence"},{"journal-title":"Reinforcement Learning An Introduction","year":"2018","author":"sutton","key":"ref4"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/978-0-8176-4606-6"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989385"},{"key":"ref27","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation","author":"sutton","year":"2000","journal-title":"Advances in neural information processing systems"},{"key":"ref6","first-page":"6070","article-title":"Risk-constrained reinforcement learning with percentile risk criteria","volume":"18","author":"chow","year":"2017","journal-title":"The Journal of Machine Learning Research"},{"key":"ref5","volume":"7","author":"altman","year":"1999","journal-title":"Constrained Markov Decision Processes"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1016\/S0005-1098(99)00113-2"},{"key":"ref8","first-page":"22","article-title":"Constrained policy optimization","author":"achiam","year":"2017","journal-title":"Proceedings of the 34th International Conference on Machine Learning-Volume 70"},{"article-title":"Responsive safety in reinforcement learning by pid lagrangian methods","year":"2020","author":"stooke","key":"ref7"},{"key":"ref2","first-page":"1334","article-title":"Endto-end training of deep visuomotor policies","volume":"17","author":"levine","year":"2016","journal-title":"J Mach Learn Res"},{"key":"ref1","article-title":"High-dimensional continuous control using generalized advantage estimation","author":"schulman","year":"2016","journal-title":"Proceedings of the International Conference on Learning Representations (ICLR)"},{"key":"ref9","first-page":"3121","article-title":"Convergent policy optimization for safe reinforcement learning","author":"yu","year":"2019","journal-title":"Advances in neural information processing systems"},{"article-title":"Safe reinforcement learning via curriculum induction","year":"2020","author":"turchetta","key":"ref20"},{"key":"ref22","first-page":"3981","article-title":"Learning to learn by gradient descent by gradient descent","author":"andrychowicz","year":"2016","journal-title":"Advances in neural information processing systems"},{"key":"ref21","first-page":"1437","article-title":"A comprehensive survey&#x00B4; on safe reinforcement learning","volume":"16","author":"garc?a","year":"2015","journal-title":"Journal of Machine Learning Research"},{"article-title":"Meta-sgd: Learning to learn quickly for few-shot learning","year":"2017","author":"li","key":"ref24"},{"key":"ref23","first-page":"748","article-title":"Learning to learn without gradient descent by gradient descent","volume":"70","author":"chen","year":"2017","journal-title":"Proceedings of the 34th International Conference on Machine Learning"},{"article-title":"Meta-learning in neural networks: A survey","year":"2020","author":"hospedales","key":"ref26"},{"article-title":"Learned optimizers that scale and generalize","year":"2017","author":"wichrowska","key":"ref25"}],"event":{"name":"2021 IEEE International Conference on Robotics and Automation (ICRA)","start":{"date-parts":[[2021,5,30]]},"location":"Xi'an, China","end":{"date-parts":[[2021,6,5]]}},"container-title":["2021 IEEE International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9560720\/9560666\/09561147.pdf?arnumber=9561147","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,12]],"date-time":"2023-01-12T22:49:44Z","timestamp":1673563784000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9561147\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,5,30]]},"references-count":36,"URL":"https:\/\/doi.org\/10.1109\/icra48506.2021.9561147","relation":{},"subject":[],"published":{"date-parts":[[2021,5,30]]}}}