{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,2]],"date-time":"2026-01-02T07:47:33Z","timestamp":1767340053703,"version":"3.28.0"},"reference-count":42,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,5,13]],"date-time":"2024-05-13T00:00:00Z","timestamp":1715558400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,5,13]],"date-time":"2024-05-13T00:00:00Z","timestamp":1715558400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100006831","name":"United States Air Force","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100006831","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,5,13]]},"DOI":"10.1109\/icra57147.2024.10610047","type":"proceedings-article","created":{"date-parts":[[2024,8,8]],"date-time":"2024-08-08T17:51:05Z","timestamp":1723139465000},"page":"2845-2851","source":"Crossref","is-referenced-by-count":2,"title":["Reinforcement Learning in a Safety-Embedded MDP with Trajectory Optimization"],"prefix":"10.1109","author":[{"given":"Fan","family":"Yang","sequence":"first","affiliation":[{"name":"Carnegie Mellon University,Pittsburgh,PA,USA,15213"}]},{"given":"Wenxuan","family":"Zhou","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University,Pittsburgh,PA,USA,15213"}]},{"given":"Zuxin","family":"Liu","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University,Pittsburgh,PA,USA,15213"}]},{"given":"Ding","family":"Zhao","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University,Pittsburgh,PA,USA,15213"}]},{"given":"David","family":"Held","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University,Pittsburgh,PA,USA,15213"}]}],"member":"263","reference":[{"article-title":"Playing atari with deep reinforcement learning","year":"2013","author":"Mnih","key":"ref1"},{"article-title":"Solving rubik\u2019s cube with a robot hand","year":"2019","author":"Akkaya","key":"ref2"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1177\/0278364919887447"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2021.3054625"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1080\/01691864.2017.1365009"},{"issue":"1","key":"ref6","first-page":"1334","article-title":"End-to-end training of deep visuomotor policies","volume":"17","author":"Levine","year":"2016","journal-title":"The Journal of Machine Learning Research"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1201\/9781315140223"},{"key":"ref8","first-page":"22","article-title":"Constrained policy optimization","volume-title":"International Conference on Machine Learning","author":"Achiam"},{"key":"ref9","article-title":"Benchmarking safe exploration in deep reinforcement learning","volume":"7","author":"Ray","year":"2019"},{"article-title":"Constrained policy optimization via bayesian world models","year":"2022","author":"As","key":"ref10"},{"key":"ref11","first-page":"13644","article-title":"Constrained variational policy optimization for safe reinforcement learning","volume-title":"International Conference on Machine Learning","author":"Liu"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460547"},{"article-title":"Safe exploration in continuous action spaces","year":"2018","author":"Dalal","key":"ref13"},{"article-title":"Model-based safe deep reinforcement learning via a constrained proximal policy optimization algorithm","year":"2022","author":"Jayant","key":"ref14"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11797"},{"article-title":"Safe model-based reinforcement learning with stability guarantees","year":"2017","author":"Berkenkamp","key":"ref16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2018.8619572"},{"key":"ref18","first-page":"8378","article-title":"Natural policy gradient primal-dual method for constrained markov decision processes","volume":"33","author":"Ding","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"Value constrained model-free continuous control","year":"2019","author":"Bohez","key":"ref19"},{"article-title":"A lyapunov-based approach to safe reinforcement learning","year":"2018","author":"Chow","key":"ref20"},{"key":"ref21","first-page":"11480","article-title":"Crpo: A new approach for safe reinforcement learning with convergence guarantee","volume-title":"International Conference on Machine Learning","author":"Xu"},{"article-title":"Projection-based constrained policy optimization","year":"2020","author":"Yang","key":"ref22"},{"key":"ref23","article-title":"Ara*: Anytime a* with provable bounds on sub-optimality","volume":"16","author":"Likhachev","year":"2003","journal-title":"Advances in neural information processing systems"},{"key":"ref24","first-page":"476","article-title":"D\u02c6* lite","volume":"15","author":"Koenig","year":"2002","journal-title":"Aaai\/iaai"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1177\/027836499801700706"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58292-0_160929"},{"article-title":"Calipso: A differentiable solver for trajectory optimization with conic and complementarity constraints","year":"2022","author":"Howell","key":"ref27"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3152696"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9197260"},{"article-title":"Learning to optimize","year":"2016","author":"Li","key":"ref30"},{"key":"ref31","article-title":"Differentiable mpc for end-to-end planning and control","volume":"31","author":"Amos","year":"2018","journal-title":"Advances in neural information processing systems"},{"article-title":"Risk-averse zero-order trajectory optimization","volume-title":"5th Annual Conference on Robot Learning","author":"Vlastelica","key":"ref32"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3193497"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9561315"},{"article-title":"Integrating task-motion planning with reinforcement learning for robust decision making in mobile robots","year":"2018","author":"Jiang","key":"ref35"},{"key":"ref36","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"International conference on machine learning","author":"Haarnoja"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2014.x.001"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1177\/0278364918790369"},{"issue":"67-68","key":"ref39","first-page":"7","article-title":"Numerical optimization","volume":"35","author":"Wright","year":"1999","journal-title":"Springer Science"},{"article-title":"Theseus: A library for differentiable nonlinear optimization","year":"2022","author":"Pineda","key":"ref40"},{"article-title":"Towards safe reinforcement learning with a safety editor policy","year":"2022","author":"Yu","key":"ref41"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"}],"event":{"name":"2024 IEEE International Conference on Robotics and Automation (ICRA)","start":{"date-parts":[[2024,5,13]]},"location":"Yokohama, Japan","end":{"date-parts":[[2024,5,17]]}},"container-title":["2024 IEEE International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10609961\/10609862\/10610047.pdf?arnumber=10610047","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,10]],"date-time":"2024-08-10T05:15:58Z","timestamp":1723266958000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10610047\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,13]]},"references-count":42,"URL":"https:\/\/doi.org\/10.1109\/icra57147.2024.10610047","relation":{},"subject":[],"published":{"date-parts":[[2024,5,13]]}}}