{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,5]],"date-time":"2025-06-05T04:15:32Z","timestamp":1749096932140,"version":"3.41.0"},"reference-count":69,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"6","license":[{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100000038","name":"Canada CIFAR AI Chairs Program, the Natural Sciences and Engineering Research Council of Canada","doi-asserted-by":"publisher","award":["RGPIN-2021-02549","RGPAS-2021-00034","DGECR-2021-00019"],"award-info":[{"award-number":["RGPIN-2021-02549","RGPAS-2021-00034","DGECR-2021-00019"]}],"id":[{"id":"10.13039\/501100000038","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100020959","name":"JST-Mirai Program","doi-asserted-by":"publisher","award":["JPMJMI20B8"],"award-info":[{"award-number":["JPMJMI20B8"]}],"id":[{"id":"10.13039\/501100020959","id-type":"DOI","asserted-by":"publisher"}]},{"name":"JSPS KAKENHI","award":["JP21H04877","JP23H03372","JP24K02920"],"award-info":[{"award-number":["JP21H04877","JP23H03372","JP24K02920"]}]},{"DOI":"10.13039\/100006639","name":"Autoware Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100006639","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2025,6]]},"DOI":"10.1109\/tnnls.2024.3496492","type":"journal-article","created":{"date-parts":[[2024,11,25]],"date-time":"2024-11-25T18:48:00Z","timestamp":1732560480000},"page":"10678-10692","source":"Crossref","is-referenced-by-count":0,"title":["GenSafe: A Generalizable Safety Enhancer for Safe Reinforcement Learning Algorithms Based on Reduced Order Markov Decision Process Model"],"prefix":"10.1109","volume":"36","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-9542-4858","authenticated-orcid":false,"given":"Zhehua","family":"Zhou","sequence":"first","affiliation":[{"name":"Department of Electrical and Computer Engineering, University of Alberta, Edmonton, AB, Canada"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3981-8515","authenticated-orcid":false,"given":"Xuan","family":"Xie","sequence":"additional","affiliation":[{"name":"Department of Electrical and Computer Engineering, University of Alberta, Edmonton, AB, Canada"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-7093-9781","authenticated-orcid":false,"given":"Jiayang","family":"Song","sequence":"additional","affiliation":[{"name":"Department of Electrical and Computer Engineering, University of Alberta, Edmonton, AB, Canada"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5933-254X","authenticated-orcid":false,"given":"Zhan","family":"Shu","sequence":"additional","affiliation":[{"name":"Department of Electrical and Computer Engineering, University of Alberta, Edmonton, AB, Canada"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8621-2420","authenticated-orcid":false,"given":"Lei","family":"Ma","sequence":"additional","affiliation":[{"name":"Department of Electrical and Computer Engineering, University of Alberta, Edmonton, AB, Canada"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.3390\/robotics10010022"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2021.3054625"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.3390\/electronics10090999"},{"issue":"1","key":"ref4","first-page":"1437","article-title":"A comprehensive survey on safe reinforcement learning","volume":"16","author":"Garc\u0131a","year":"2015","journal-title":"J. Mach. Learn. Res."},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2022.03.003"},{"key":"ref6","first-page":"1329","article-title":"Benchmarking deep reinforcement learning for continuous control","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Duan"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2020.2992981"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1146\/annurev-control-042920-020211"},{"key":"ref9","article-title":"A review of safe reinforcement learning: Methods, theory and applications","volume-title":"arXiv:2205.10330","author":"Gu","year":"2022"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1201\/9781315140223"},{"key":"ref11","first-page":"9797","article-title":"Safe reinforcement learning in constrained Markov decision processes","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Wachi"},{"key":"ref12","article-title":"Benchmarking batch deep reinforcement learning algorithms","volume-title":"arXiv:1910.01708","author":"Fujimoto","year":"2019"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-39415-8"},{"key":"ref14","article-title":"Constrained model-based reinforcement learning with robust cross-entropy method","volume-title":"arXiv:2010.07968","author":"Liu","year":"2020"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-021-06103-6"},{"key":"ref16","first-page":"13859","article-title":"Safe reinforcement learning by imagining the near future","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Thomas"},{"key":"ref17","first-page":"24432","article-title":"Model-based safe deep reinforcement learning via a constrained proximal policy optimization algorithm","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Jayant"},{"key":"ref18","first-page":"1622","article-title":"Learning off-policy with online planning","volume-title":"Proc. Conf. Robot Learn.","author":"Sikchi"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i5.20478"},{"key":"ref20","article-title":"Constrained policy optimization via Bayesian world models","volume-title":"arXiv:2201.09802","author":"As","year":"2022"},{"key":"ref21","first-page":"7461","article-title":"Constrained cross-entropy method for safe reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Wen"},{"key":"ref22","first-page":"22","article-title":"Constrained policy optimization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Achiam"},{"key":"ref23","article-title":"Reward constrained policy optimization","volume-title":"arXiv:1805.11074","author":"Tessler","year":"2018"},{"key":"ref24","first-page":"3121","article-title":"Convergent policy optimization for safe reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Yu"},{"key":"ref25","first-page":"15338","article-title":"First order constrained optimization in policy space","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Zhang"},{"key":"ref26","doi-asserted-by":"crossref","DOI":"10.1007\/978-3-540-78841-6","volume-title":"Model Order Reduction: Theory, Research Aspects and Applications","volume":"13","author":"Schilders","year":"2008"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3106818"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9197004"},{"key":"ref29","article-title":"Safe exploration in continuous action spaces","volume-title":"arXiv:1801.08757","author":"Dalal","year":"2018"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2018.2876389"},{"key":"ref31","first-page":"8103","article-title":"A Lyapunov-based approach to safe reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Chow"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2020.3024161"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2002.1184811"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1016\/j.sysconle.2004.08.007"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1007\/s10957-012-9989-5"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3236361"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2023\/763"},{"key":"ref38","first-page":"53239","article-title":"Sample-efficient and safe deep reinforcement learning via reset deep ensemble agents","volume-title":"Proc. 37th Int. Conf. Neural Inf. Process. Syst.","author":"Kim"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA46639.2022.9811604"},{"key":"ref40","article-title":"Proximal policy optimization algorithms","volume-title":"arXiv:1707.06347","author":"Schulman","year":"2017"},{"key":"ref41","first-page":"1889","article-title":"Trust region policy optimization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Schulman"},{"volume-title":"Constrained Optimization and Lagrange Multiplier Methods","year":"2014","author":"Bertsekas","key":"ref42"},{"key":"ref43","article-title":"Projection-based constrained policy optimization","volume-title":"arXiv:2010.03152","author":"Yang","year":"2020"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-022-06187-8"},{"key":"ref45","first-page":"9111","article-title":"Constrained update projection approach to safe policy optimization","volume-title":"Proc. 36th Int. Conf. Neural Inf. Process. Syst.","author":"Yang"},{"key":"ref46","article-title":"High-dimensional continuous control using generalized advantage estimation","volume-title":"arXiv:1506.02438","author":"Schulman","year":"2015"},{"key":"ref47","first-page":"11480","article-title":"CRPO: A new approach for safe reinforcement learning with convergence guarantee","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Xu"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5932"},{"key":"ref49","first-page":"465","article-title":"PILCO: A model-based and data-efficient approach to policy search","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Deisenroth"},{"key":"ref50","first-page":"3509","article-title":"Algorithms for CVaR optimization in MDPs","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"27","author":"Chow"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/ECC.2015.7330913"},{"key":"ref52","first-page":"908","article-title":"Safe model-based reinforcement learning with stability guarantees","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Berkenkamp"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793611"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3213566"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2023.3274908"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3098985"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-24853-0"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2013.02.003"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8794107"},{"issue":"11","key":"ref60","first-page":"2579","article-title":"Visualizing data using t-SNE","volume":"9","author":"Van der Maaten","year":"2008","journal-title":"J. Mach. Learn. Res."},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1002\/wics.101"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1007\/978-0-387-73003-5_196"},{"key":"ref63","volume-title":"Mixture Models: Inference and Applications to Clustering","volume":"38","author":"McLachlan","year":"1988"},{"volume-title":"Reinforcement Learning: An Introduction","year":"2018","author":"Sutton","key":"ref64"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/ICNN.1995.488968"},{"key":"ref66","article-title":"Safety-gymnasium: A unified safe reinforcement learning benchmark","volume-title":"arXiv:2310.12567","author":"Ji","year":"2023"},{"key":"ref67","article-title":"Towards a unified policy abstraction theory and representation learning approach in Markov decision processes","volume-title":"arXiv:2209.07696","author":"Zhang","year":"2022"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00947"},{"key":"ref69","article-title":"Representation learning for online and offline RL in low-rank MDPs","volume-title":"arXiv:2110.04652","author":"Uehara","year":"2021"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/5962385\/11022714\/10766903.pdf?arnumber=10766903","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,4]],"date-time":"2025-06-04T17:57:52Z","timestamp":1749059872000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10766903\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6]]},"references-count":69,"journal-issue":{"issue":"6"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2024.3496492","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"type":"print","value":"2162-237X"},{"type":"electronic","value":"2162-2388"}],"subject":[],"published":{"date-parts":[[2025,6]]}}}