{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,20]],"date-time":"2026-02-20T18:11:34Z","timestamp":1771611094424,"version":"3.50.1"},"reference-count":35,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,5,19]],"date-time":"2025-05-19T00:00:00Z","timestamp":1747612800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,5,19]],"date-time":"2025-05-19T00:00:00Z","timestamp":1747612800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,5,19]]},"DOI":"10.1109\/icra55743.2025.11128552","type":"proceedings-article","created":{"date-parts":[[2025,9,2]],"date-time":"2025-09-02T17:28:56Z","timestamp":1756834136000},"page":"10268-10274","source":"Crossref","is-referenced-by-count":4,"title":["Stage-Wise Reward Shaping for Acrobatic Robots: A Constrained Multi-Objective Reinforcement Learning Approach"],"prefix":"10.1109","author":[{"given":"Dohyeong","family":"Kim","sequence":"first","affiliation":[{"name":"Seoul National University,Department of Electrical and Computer Engineering and ASRI,Seoul,Korea,08826"}]},{"given":"Hyeokjin","family":"Kwon","sequence":"additional","affiliation":[{"name":"Interdisciplinary Program in Artificial Intelligence and ASRI, Seoul National University,Seoul,Korea,08826"}]},{"given":"Junseok","family":"Kim","sequence":"additional","affiliation":[{"name":"Seoul National University,Department of Electrical and Computer Engineering and ASRI,Seoul,Korea,08826"}]},{"given":"Gunmin","family":"Lee","sequence":"additional","affiliation":[{"name":"Seoul National University,Department of Electrical and Computer Engineering and ASRI,Seoul,Korea,08826"}]},{"given":"Songhwai","family":"Oh","sequence":"additional","affiliation":[{"name":"Seoul National University,Department of Electrical and Computer Engineering and ASRI,Seoul,Korea,08826"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.abc5986"},{"key":"ref2","article-title":"Walk these ways: Tuning robot control for generalization with multiplicity of behavior","volume-title":"Proceedings of Conference on Robot Learning","author":"Margolis","year":"2023"},{"key":"ref3","article-title":"Learning agile locomotion on risky terrains","author":"Zhang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2021.xvii.011"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA46639.2022.9812166"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3151396"},{"key":"ref7","article-title":"Barkour: Bench-marking animal-level agility with quadruped robots","author":"Caluwaerts","year":"2023","journal-title":"arXiv preprint"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA57147.2024.10610200"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2021.XVII.061"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2023.xix.052"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/3197517.3201311"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3528223.3530110"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2024.xx.103"},{"key":"ref14","article-title":"Scale-invariant gradient aggregation for constrained multi-objective reinforcement learning","author":"Kim","year":"2024","journal-title":"arXiv preprint"},{"key":"ref15","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv preprint"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2022\/520"},{"key":"ref17","article-title":"Pareto policy adaptation","volume-title":"Proceedings of International Conference on Learning Representations, 2022","author":"Kyriakis"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ADPRL.2013.6615007"},{"key":"ref19","article-title":"PD-MORL: Preference-driven multi-objective reinforcement learning algorithm","volume-title":"Proceedings of International Conference on Learning Representations, 2023","author":"Basaklar"},{"key":"ref20","article-title":"Prediction-guided multi-objective reinforcement learning for continuous robot control","volume-title":"Proceedings of International Conference on Machine Learning, 2020","author":"Xu"},{"key":"ref21","article-title":"Distributional pareto-optimal multi-objective reinforcement learning","journal-title":"Advances in Neural Information Processing Systems, 2023"},{"key":"ref22","article-title":"A distributional view on multi-objective policy optimization","volume-title":"Proceedings of International Conference on Machine Learning, 2020","author":"Abdolmaleki"},{"key":"ref23","article-title":"A generalized algorithm for multi-objective reinforcement learning and policy adaptation","author":"Yang","journal-title":"Advances in Neural Information Processing Systems, 2019"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/s10458-022-09552-y"},{"key":"ref25","article-title":"A constrained multi-objective reinforcement learning framework","volume-title":"Proceedings of Conference on Robot Learning","author":"Huang","year":"2022"},{"key":"ref26","article-title":"Responsive safety in rein-forcement learning by pid lagrangian methods","volume-title":"Proceedings of International Conference on Machine Learning","author":"Stooke","year":"2020"},{"key":"ref27","article-title":"Constrained policy optimization","volume-title":"Proceedings of International Conference on Machine Learning","author":"Achiam","year":"2017"},{"key":"ref28","article-title":"Isaac gym: High performance gpu based physics simulation for robot learning","author":"Makoviychuk","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1201\/9781315140223"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2024.3400935"},{"key":"ref31","article-title":"Evaluation of constrained reinforcement learning algorithms for legged locomotion","author":"Lee","year":"2023","journal-title":"arXiv preprint"},{"key":"ref32","article-title":"Multi-objective reinforcement learning: Convexity, stationarity and Pareto optimality","volume-title":"Proceedings of International Conference on Learning Representations","author":"Lu","year":"2023"},{"key":"ref33","article-title":"Trust region policy optimization","volume-title":"Proceedings of International Conference on Machine Learning","author":"Schulman","year":"2015"},{"key":"ref34","article-title":"Legged locomotion in challenging terrains using egocentric vision","volume-title":"Proceedings of Conference on robot learning","author":"Agarwal","year":"2023"},{"key":"ref35","article-title":"unitree_ros","volume-title":"gitHub repository","author":"Robotics","year":"2024"}],"event":{"name":"2025 IEEE International Conference on Robotics and Automation (ICRA)","location":"Atlanta, GA, USA","start":{"date-parts":[[2025,5,19]]},"end":{"date-parts":[[2025,5,23]]}},"container-title":["2025 IEEE International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11127273\/11127223\/11128552.pdf?arnumber=11128552","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,3]],"date-time":"2025-09-03T06:08:12Z","timestamp":1756879692000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11128552\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,19]]},"references-count":35,"URL":"https:\/\/doi.org\/10.1109\/icra55743.2025.11128552","relation":{},"subject":[],"published":{"date-parts":[[2025,5,19]]}}}