{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,5]],"date-time":"2026-03-05T15:30:45Z","timestamp":1772724645335,"version":"3.50.1"},"reference-count":28,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,6,8]],"date-time":"2022-06-08T00:00:00Z","timestamp":1654646400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,6,8]],"date-time":"2022-06-08T00:00:00Z","timestamp":1654646400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100000923","name":"Australian Research Council","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100000923","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,6,8]]},"DOI":"10.23919\/acc53348.2022.9867842","type":"proceedings-article","created":{"date-parts":[[2022,9,5]],"date-time":"2022-09-05T20:24:10Z","timestamp":1662409450000},"page":"2116-2123","source":"Crossref","is-referenced-by-count":11,"title":["Youla-REN: Learning Nonlinear Feedback Policies with Robust Stability Guarantees"],"prefix":"10.23919","author":[{"given":"Ruigang","family":"Wang","sequence":"first","affiliation":[{"name":"The University of Sydney,Australian Centre for Field Robotics,Sydney,Australia,NSW 2006"}]},{"given":"Ian R.","family":"Manchester","sequence":"additional","affiliation":[{"name":"The University of Sydney,Australian Centre for Field Robotics,Sydney,Australia,NSW 2006"}]}],"member":"263","reference":[{"key":"ref10","first-page":"708","article-title":"Learning for safety-critical control with control barrier functions","author":"taylor","year":"2020","journal-title":"Learning for Dynamics and Control"},{"key":"ref11","article-title":"Recurrent neural network controllers synthesis with stability guarantees for partially observed systems","author":"gu","year":"2021"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1002\/rnc.670"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ADPRL.2011.5967370"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.1976.1101223"},{"key":"ref15","volume":"7","author":"boyd","year":"0","journal-title":"Linear Controller Design Limits of Performance"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4471-0507-7"},{"key":"ref17","article-title":"Recurrent equilibrium networks: Flexible dynamic models with guaranteed stability and robustness","author":"revay","year":"2021"},{"key":"ref18","first-page":"690","article-title":"Deep equilibrium models","author":"bai","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1137\/20M1358517"},{"key":"ref28","article-title":"Adam: A Method for Stochastic Optimization","author":"kingma","year":"2017","journal-title":"International Conference on Learning Representations (ICLR)"},{"key":"ref4","article-title":"Safe learning in robotics: From learning-based control to safe reinforcement learning","author":"brunke","year":"2021"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1016\/0364-0213(90)90002-E"},{"key":"ref3","article-title":"Concrete problems in AI safety","author":"amodei","year":"2016"},{"key":"ref6","article-title":"Deep lyapunov function: Automatic stability analysis for dynamical systems","author":"mehrjou","year":"2019"},{"key":"ref5","first-page":"3245","article-title":"Neural Lyapunov control","author":"chang","year":"2019","journal-title":"Proceedings of the 33rd International Conference on Neural Information Processing Systems"},{"key":"ref8","article-title":"H infinity model-free reinforcement learning with robust stability guarantee","author":"han","year":"2019"},{"key":"ref7","article-title":"Safe model-based reinforcement learning with stability guarantees","author":"berkenkamp","year":"2017"},{"key":"ref2","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2021.XVII.063"},{"key":"ref1","doi-asserted-by":"crossref","first-page":"354","DOI":"10.1038\/nature24270","article-title":"Mastering the game of go without human knowledge","volume":"550","author":"silver","year":"2017","journal-title":"Nature"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/9.587335"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2017.2668380"},{"key":"ref21","volume":"40","author":"zhou","year":"1996","journal-title":"Robust and Optimal Control"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/LCSYS.2018.2836355"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1002\/rnc.5559"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref25","article-title":"Reduced-order nonlinear observers via contraction analysis and convex optimization","author":"yi","year":"2021","journal-title":"IEEE Transactions on Automatic Control"}],"event":{"name":"2022 American Control Conference (ACC)","location":"Atlanta, GA, USA","start":{"date-parts":[[2022,6,8]]},"end":{"date-parts":[[2022,6,10]]}},"container-title":["2022 American Control Conference (ACC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9866948\/9867142\/09867842.pdf?arnumber=9867842","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,3]],"date-time":"2022-10-03T20:37:57Z","timestamp":1664829477000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9867842\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,6,8]]},"references-count":28,"URL":"https:\/\/doi.org\/10.23919\/acc53348.2022.9867842","relation":{},"subject":[],"published":{"date-parts":[[2022,6,8]]}}}