{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T12:41:37Z","timestamp":1766061697995,"version":"3.48.0"},"reference-count":40,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100002858","name":"China Postdoctoral Science Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100002858","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100002858","name":"China Postdoctoral Science Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100002858","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100012554","name":"Hubei Provincial Department of Education","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100012554","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,19]]},"DOI":"10.1109\/iros60139.2025.11247653","type":"proceedings-article","created":{"date-parts":[[2025,11,27]],"date-time":"2025-11-27T18:54:45Z","timestamp":1764269685000},"page":"2360-2367","source":"Crossref","is-referenced-by-count":0,"title":["A Safety-Adjusted Policy Optimization Algorithm and Application for Obstacle Avoidance in the Quadcopter"],"prefix":"10.1109","author":[{"given":"Gang","family":"Xia","sequence":"first","affiliation":[{"name":"Sichuan University,College of Electronics and Information Engineering,Chengdu,China,610065"}]},{"given":"Xinsong","family":"Yang","sequence":"additional","affiliation":[{"name":"Sichuan University,College of Electronics and Information Engineering,Chengdu,China,610065"}]},{"given":"Qihan","family":"Qi","sequence":"additional","affiliation":[{"name":"Sichuan University,College of Electronics and Information Engineering,Chengdu,China,610065"}]},{"given":"Yaping","family":"Sun","sequence":"additional","affiliation":[{"name":"Sichuan University,College of Electronics and Information Engineering,Chengdu,China,610065"}]},{"given":"Xiwang","family":"Dong","sequence":"additional","affiliation":[{"name":"Institute of Artificial Intelligence, Beihang University,Beijing,China,100191"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1177\/0278364919887447"},{"article-title":"Dota 2 with large scale deep reinforcement learning","year":"2019","author":"Berner","key":"ref2"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1126\/science.aar6404"},{"key":"ref4","first-page":"326","article-title":"DATT: Deep adaptive trajectory tracking for quadrotor control","volume-title":"Conference on Robot Learning","author":"Huang"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3457538"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.23919\/ACC50511.2021.9483182"},{"key":"ref7","article-title":"Safe model-based reinforcement learning with stability guarantees","volume":"30","author":"Berkenkamp","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9196867"},{"article-title":"Shortest-path constrained reinforcement learning for sparse reward tasks","year":"2021","author":"Sohn","key":"ref9"},{"key":"ref10","first-page":"10630","article-title":"Safe reinforcement learning using advantage-based intervention","volume-title":"International Conference on Machine Learning","author":"Wagener"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3070252"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i8.20855"},{"key":"ref13","first-page":"21611","article-title":"Constrained decision transformer for offline safe reinforcement learning","volume-title":"International Conference on Machine Learning","author":"Liu"},{"key":"ref14","first-page":"15338","article-title":"First order constrained optimization in policy space","volume":"33","author":"Zhang","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"Projection-Based constrained policy optimization","volume-title":"8th International Conference on Learning Representations","author":"Yang","key":"ref15"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5932"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1201\/9781315140223"},{"key":"ref18","first-page":"9797","article-title":"Safe reinforcement learning in Constrained Markov decision processes","volume-title":"International Conference on Machine Learning","author":"Wachi"},{"key":"ref19","first-page":"1889","article-title":"Trust region policy optimization","volume-title":"Proceedings of the International Conference on Machine Learning","author":"Schulman"},{"key":"ref20","first-page":"22","article-title":"Constrained policy optimization","volume-title":"Proceedings of the International Conference on Machine Learning","author":"Achiam"},{"issue":"167","key":"ref21","first-page":"1","article-title":"Risk-constrained reinforcement learning with percentile risk criteria","volume":"18","author":"Chow","year":"2018","journal-title":"Journal of Machine Learning Research"},{"article-title":"Reward constrained policy optimization","year":"2018","author":"Tessler","key":"ref22"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2022\/520"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3062375"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.23919\/ACC60939.2024.10644548"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/GNCC42960.2018.9018854"},{"journal-title":"Reinforcement Learning: An Introduction","year":"2018","author":"Sutton","key":"ref27"},{"key":"ref28","first-page":"267","article-title":"Approximately optimal approximate reinforcement learning","volume-title":"Proceedings of the Nineteenth International Conference on Machine Learning","author":"Kakade"},{"article-title":"High-dimensional continuous control using generalized advantage estimation","year":"2018","author":"Schulman","key":"ref29"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.12794\/metadc1505267"},{"key":"ref31","first-page":"2608","article-title":"Towards safe reinforcement learning with a safety editor policy","volume":"35","author":"Yu","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"Safe exploration in continuous action spaces","year":"2018","author":"Dalal","key":"ref32"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460547"},{"key":"ref34","first-page":"1093","article-title":"Safe reinforcement learning using robust action governor","author":"Li","year":"2021","journal-title":"Learning for Dynamics and Control"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9561138"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2024.3356168"},{"issue":"268","key":"ref37","first-page":"1","article-title":"Stable-Baselines3: Reliable reinforcement learning implementations","volume":"22","author":"Raffin","year":"2021","journal-title":"Journal of Machine Learning Research"},{"issue":"285","key":"ref38","first-page":"1","article-title":"Omnisafe: An infrastructure for accelerating safe reinforcement learning research","volume":"25","author":"Ji","year":"2024","journal-title":"Journal of Machine Learning Research"},{"article-title":"Benchmarking safe exploration in deep reinforcement learning","year":"2020","author":"Ray","key":"ref39"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989376"}],"event":{"name":"2025 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","start":{"date-parts":[[2025,10,19]]},"location":"Hangzhou, China","end":{"date-parts":[[2025,10,25]]}},"container-title":["2025 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11245651\/11245652\/11247653.pdf?arnumber=11247653","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T12:38:54Z","timestamp":1766061534000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11247653\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,19]]},"references-count":40,"URL":"https:\/\/doi.org\/10.1109\/iros60139.2025.11247653","relation":{},"subject":[],"published":{"date-parts":[[2025,10,19]]}}}