{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T16:22:29Z","timestamp":1776183749146,"version":"3.50.1"},"reference-count":46,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,11,28]],"date-time":"2022-11-28T00:00:00Z","timestamp":1669593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,11,28]],"date-time":"2022-11-28T00:00:00Z","timestamp":1669593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,11,28]]},"DOI":"10.1109\/humanoids53995.2022.10000148","type":"proceedings-article","created":{"date-parts":[[2023,1,5]],"date-time":"2023-01-05T19:08:26Z","timestamp":1672945706000},"page":"405-412","source":"Crossref","is-referenced-by-count":5,"title":["Adapting Object-Centric Probabilistic Movement Primitives with Residual Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Joao","family":"Carvalho","sequence":"first","affiliation":[{"name":"Institute for Intelligent Autonomous Systems,TU Darmstadt (TUDa),Computer Science Department"}]},{"given":"Dorothea","family":"Koert","sequence":"additional","affiliation":[{"name":"Institute for Intelligent Autonomous Systems,TU Darmstadt (TUDa),Computer Science Department"}]},{"given":"Marek","family":"Daniv","sequence":"additional","affiliation":[{"name":"Institute for Intelligent Autonomous Systems,TU Darmstadt (TUDa),Computer Science Department"}]},{"given":"Jan","family":"Peters","sequence":"additional","affiliation":[{"name":"Institute for Intelligent Autonomous Systems,TU Darmstadt (TUDa),Computer Science Department"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-30301-5_60"},{"issue":"30","key":"ref2","article-title":"A review of robot learning for manipulation: Challenges, representations, and algorithms","volume":"22","author":"Kroemer","year":"2021","journal-title":"Journal of machine learning research"},{"key":"ref3","article-title":"Residual policy learning","author":"Silver","year":"2018","journal-title":"arXiv preprint"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8794127"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/IROS45743.2020.9341714"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3150024"},{"key":"ref7","article-title":"Robot skill adaptation via soft actor-critic gaussian mixture models","author":"Nematollahi","year":"2021","journal-title":"arXiv preprint"},{"key":"ref8","article-title":"Probabilistic movement primitives","volume-title":"Advances in Neural Information Processing Systems","volume":"26","author":"Paraschos","year":"2013"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/s10514-017-9648-7"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2019.2937010"},{"key":"ref11","first-page":"1856","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proceedings of International Conference on Machine Learning (ICML)","volume":"80","author":"Haarnoja"},{"key":"ref12","article-title":"Residual reinforcement learning from demonstrations","author":"Alakuijala","year":"2021","journal-title":"arXiv preprint"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1002\/aisy.202100095"},{"key":"ref14","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"Proceedings of the 35th International Conference on Machine Learning","volume":"80","author":"Fujimoto"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/IROS40897.2019.8968201"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCB.2009.2026289"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2007.11.026"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.3010739"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.3390\/app10196923"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9561619"},{"key":"ref21","article-title":"Goal-conditioned imitation learning","volume":"32","author":"Ding","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3073711"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9197125"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/IROS45743.2020.9341390"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/IROS51168.2021.9636176"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/IROS51168.2021.9636417"},{"key":"ref27","first-page":"arXiv-2011","article-title":"Learning of long-horizon sparse-reward robotic manipulator tasks with base controllers","author":"Wang","year":"2020","journal-title":"arXiv e-prints"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3076971"},{"issue":"8","key":"ref29","volume-title":"Learning, improving, and generalizing motor skills for the peg-in-hole tasks based on imitation learning and self-learning","volume":"10","author":"Cho","year":"2020"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-010-5223-6"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2003.1242165"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1162\/NECO_a_00393"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCB.2006.886952"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1177\/0278364919846363"},{"key":"ref35","article-title":"Imitationflow: Learning deep stable stochastic dynamic systems by normalizing flows","volume-title":"IEEE\/RSJ International Conference on Intelligent Robots and Systems","author":"Urain"},{"key":"ref36","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation","author":"Sutton","year":"1999","journal-title":"Advances in Neural Information Processing Systems (NIPS)"},{"key":"ref37","first-page":"179","article-title":"Off-policy actor-critic","volume-title":"Proceedings of the 29th International Coference on International Conference on Machine Learning, ser. ICML12","author":"Degris"},{"key":"ref38","article-title":"Auto-Encoding Variational Bayes","volume-title":"2nd International Conference on Learning Representations, ICLR 2014","author":"Kingma","year":"2014"},{"key":"ref39","volume-title":"Orientation probabilistic movement primitives on riemannian manifolds","volume":"abs\/2110.15036","author":"Rozo","year":"2021"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2019.2928760"},{"key":"ref41","volume-title":"Ubongo 3d","year":"2021"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-11008-0"},{"key":"ref43","article-title":"Adam: A method for stochastic optimization","volume-title":"conference paper at the 3rd International Conference for Learning Representations","author":"Kingma","year":"2014"},{"key":"ref44","author":"DEramo","year":"2021","journal-title":"Mushroomrl: Simplifying reinforcement learning research"},{"key":"ref45","volume-title":"Stanford Artificial Intelligence Laboratory"},{"key":"ref46","first-page":"11","article-title":"Orientation probabilistic movement primitives on riemannian manifolds","volume-title":"Conference on Robot Learning","volume":"5","author":"Rozo"}],"event":{"name":"2022 IEEE-RAS 21st International Conference on Humanoid Robots (Humanoids)","location":"Ginowan, Japan","start":{"date-parts":[[2022,11,28]]},"end":{"date-parts":[[2022,11,30]]}},"container-title":["2022 IEEE-RAS 21st International Conference on Humanoid Robots (Humanoids)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9999736\/9999739\/10000148.pdf?arnumber=10000148","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,14]],"date-time":"2024-03-14T04:51:45Z","timestamp":1710391905000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10000148\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,11,28]]},"references-count":46,"URL":"https:\/\/doi.org\/10.1109\/humanoids53995.2022.10000148","relation":{},"subject":[],"published":{"date-parts":[[2022,11,28]]}}}