{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T01:39:58Z","timestamp":1740101998461,"version":"3.37.3"},"reference-count":32,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,6,5]],"date-time":"2023-06-05T00:00:00Z","timestamp":1685923200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,6,5]],"date-time":"2023-06-05T00:00:00Z","timestamp":1685923200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100010663","name":"ERC-StG Ergo-Lean","doi-asserted-by":"publisher","award":["850932"],"award-info":[{"award-number":["850932"]}],"id":[{"id":"10.13039\/100010663","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,6,5]]},"DOI":"10.1109\/arso56563.2023.10187436","type":"proceedings-article","created":{"date-parts":[[2023,7,25]],"date-time":"2023-07-25T17:22:10Z","timestamp":1690305730000},"page":"140-146","source":"Crossref","is-referenced-by-count":4,"title":["An Open Tele-Impedance Framework to Generate Data for Contact-Rich Tasks in Robotic Manipulation"],"prefix":"10.1109","author":[{"given":"Alberto","family":"Giammarino","sequence":"first","affiliation":[{"name":"Istituto Italiano di Tecnologia,HRI2 Lab,Genoa,Italy"}]},{"given":"Juan M.","family":"Gandarias","sequence":"additional","affiliation":[{"name":"Istituto Italiano di Tecnologia,HRI2 Lab,Genoa,Italy"}]},{"given":"Arash","family":"Ajoudani","sequence":"additional","affiliation":[{"name":"Istituto Italiano di Tecnologia,HRI2 Lab,Genoa,Italy"}]}],"member":"263","reference":[{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/IROS40897.2019.8968201"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1177\/0278364912464668"},{"key":"ref15","first-page":"1179","article-title":"Conservative q-learning for offline reinforcement learning","volume":"33","author":"kumar","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref14","article-title":"Offline reinforcement learning: Tutorial, review, and perspectives on open problems","author":"levine","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.1907856118"},{"key":"ref30","article-title":"What matters in learning from offline human demonstrations for robot manipulation","author":"mandlekar","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref11","article-title":"Keep doing what worked: Behavioral modelling priors for offline reinforcement learning","author":"siegel","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref10","article-title":"Cog: Connecting new skills to past experience with offline reinforcement learning","author":"singh","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref32","article-title":"Open-vico: An open-source gazebo toolkit for multi-camera-based skeleton tracking in human-robot collaboration","author":"fortini","year":"2022","journal-title":"ArXiv Preprint"},{"key":"ref2","first-page":"103","article-title":"A framework for behavioural cloning","volume":"15","author":"bain","year":"1995","journal-title":"Machine Intelligence"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/3054912"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/HUMANOIDS.2015.7363441"},{"key":"ref16","article-title":"Maximum a posteriori policy optimisation","author":"abdolmaleki","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref19","article-title":"Accelerating online reinforcement learning with offline datasets","author":"nair","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref24","article-title":"Leveraging demonstrations for deep reinforcement learning on robotics problems with sparse rewards","author":"vecerik","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8463162"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1145\/3126594.3126664"},{"key":"ref25","first-page":"879","article-title":"Roboturk: A crowdsourcing platform for robotic skill learning through imitation","author":"mandlekar","year":"0","journal-title":"Conference on Robot Learning"},{"key":"ref20","article-title":"D4rl: Datasets for deep data-driven reinforcement learning","author":"fu","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8461249"},{"key":"ref21","article-title":"Learning complex dexterous manipulation with deep reinforcement learning and demonstrations","author":"rajeswaran","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2012.6224904"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2006.252"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.3390\/s20185357"},{"key":"ref8","article-title":"Guided reinforcement learning with learned skills","author":"pertsch","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref7","article-title":"Accelerating reinforcement learning with learned skill priors","author":"pertsch","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref9","article-title":"Relay policy learning: Solving long-horizon tasks via imitation and reinforcement learning","author":"gupta","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref4","first-page":"2","article-title":"Algorithms for inverse reinforcement learning","volume":"1","author":"ng","year":"2000","journal-title":"ICML"},{"key":"ref3","first-page":"627","article-title":"A reduction of imitation learning and structured prediction to no-regret online learning","author":"ross","year":"0","journal-title":"Proceedings of the fourteenth international conference on artificial intelligence and statistics JMLR Workshop and Conference Proceedings"},{"key":"ref6","article-title":"Parrot: Data-driven behavioral priors for reinforcement learning","author":"singh","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref5","first-page":"278","article-title":"Policy invariance under reward transformations: Theory and application to reward shaping","volume":"99","author":"ng","year":"1999","journal-title":"ICML"}],"event":{"name":"2023 IEEE International Conference on Advanced Robotics and Its Social Impacts (ARSO)","start":{"date-parts":[[2023,6,5]]},"location":"Berlin, Germany","end":{"date-parts":[[2023,6,7]]}},"container-title":["2023 IEEE International Conference on Advanced Robotics and Its Social Impacts (ARSO)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10187190\/10187410\/10187436.pdf?arnumber=10187436","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,8,14]],"date-time":"2023-08-14T17:36:54Z","timestamp":1692034614000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10187436\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6,5]]},"references-count":32,"URL":"https:\/\/doi.org\/10.1109\/arso56563.2023.10187436","relation":{},"subject":[],"published":{"date-parts":[[2023,6,5]]}}}