{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T16:57:46Z","timestamp":1777654666642,"version":"3.51.4"},"reference-count":50,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,5,19]],"date-time":"2025-05-19T00:00:00Z","timestamp":1747612800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,5,19]],"date-time":"2025-05-19T00:00:00Z","timestamp":1747612800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,5,19]]},"DOI":"10.1109\/icra55743.2025.11128690","type":"proceedings-article","created":{"date-parts":[[2025,9,2]],"date-time":"2025-09-02T17:28:56Z","timestamp":1756834136000},"page":"3344-3351","source":"Crossref","is-referenced-by-count":3,"title":["Bridging the Human to Robot Dexterity Gap Through Object-Oriented Rewards"],"prefix":"10.1109","author":[{"given":"Irmak","family":"Guzey","sequence":"first","affiliation":[{"name":"New York University"}]},{"given":"Yinlong","family":"Dai","sequence":"additional","affiliation":[{"name":"New York University"}]},{"given":"Georgy","family":"Savva","sequence":"additional","affiliation":[{"name":"New York University"}]},{"given":"Raunaq","family":"Bhirangi","sequence":"additional","affiliation":[{"name":"New York University"}]},{"given":"Lerrel","family":"Pinto","sequence":"additional","affiliation":[{"name":"New York University"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487156"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2023.xix.016"},{"key":"ref3","article-title":"Behavior Generation with Latent Actions","author":"Lee","year":"2024","journal-title":"arXiv e-prints"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1177\/02783649241273668"},{"key":"ref5","article-title":"From play to policy: Conditional behavior generation from uncurated robot data","author":"Cui","year":"2022","journal-title":"arXiv preprint"},{"key":"ref6","article-title":"RT-2: Vision-Language-Action Models Transfer Web Knowledge to Robotic Control","author":"Brohan","year":"2023","journal-title":"arXiv e-prints"},{"key":"ref7","article-title":"Open X-Embodiment: Robotic Learning Datasets and RT-X Models","author":"O\u2019Neill","year":"2023","journal-title":"arXiv e-prints"},{"key":"ref8","article-title":"Robot Utility Models: General Policies for ZeroShot Deployment in New Environments","author":"Etukuru","year":"2024","journal-title":"arXiv e-prints"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/icra48891.2023.10160547"},{"key":"ref10","article-title":"OPEN TEACH: A Versatile Teleoperation System for Robotic Manipulation","author":"Iyer","year":"2024","journal-title":"arXiv e-prints"},{"key":"ref11","article-title":"Bunny-VisionPro: Real-Time Bimanual Dexterous Teleoperation for Imitation Learning","author":"Ding","year":"2024","journal-title":"arXiv e-prints"},{"key":"ref12","article-title":"Graph inverse reinforcement learning from diverse videos","author":"Kumar","year":"2022","journal-title":"arXiv preprint"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2020.xvi.024"},{"key":"ref14","article-title":"Learning by Watching: A Review of Video-based Learning Approaches for Robot Manipulation","author":"Eze","year":"2024","journal-title":"arXiv e-prints"},{"key":"ref15","article-title":"MimicPlay: Long-Horizon Imitation Learning by Watching Human Play","author":"Wang","year":"2023","journal-title":"arXiv e-prints"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2024.XX.043"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/icra48891.2023.10160216"},{"key":"ref18","article-title":"Solving Rubik\u2019s Cube with a Robot Hand","author":"Akkaya","year":"2019","journal-title":"arXiv e-prints"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2023.XIX.089"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2023.XIX.036"},{"key":"ref21","article-title":"Eureka: Human-Level Reward Design via Coding Large Language Models","author":"Ma","year":"2023","journal-title":"arXiv e-prints"},{"key":"ref22","article-title":"Holodex: Teaching dexterity with immersive mixed reality","author":"Arunachalam","year":"2022","journal-title":"arXiv preprint"},{"key":"ref23","article-title":"ACE: A Cross-Platform VisualExoskeletons System for Low-Cost Dexterous Teleoperation","author":"Yang","year":"2024","journal-title":"arXiv e-prints"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9197124"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2022.xviii.010"},{"key":"ref26","author":"Guzey","year":"2023","journal-title":"Dexterity from touch: Self-supervised pre-training of tactile representations with robotic play"},{"key":"ref27","article-title":"Watch and match: Supercharging imitation with regularized optimal transport","author":"Haldar","year":"2022","journal-title":"arXiv preprint"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/icra57147.2024.10611407"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2023.XIX.009"},{"key":"ref30","article-title":"Dreamitate: Real-World Visuomotor Policy Learning via Video Generation","author":"Liang","year":"2024","journal-title":"arXiv eprints"},{"key":"ref31","article-title":"Learning interactive real-world simulators","author":"Yang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref32","article-title":"Cross-Domain Transfer via Semantic Skill Imitation","author":"Pertsch","year":"2022","journal-title":"arXiv e-prints"},{"key":"ref33","article-title":"Ego-Exo4D: Understanding Skilled Human Activity from First- and ThirdPerson Perspectives","author":"Grauman","year":"2023","journal-title":"arXiv e-prints"},{"key":"ref34","article-title":"Deep Generative Models in Robotics: A Survey on Learning from Multimodal Demonstrations","author":"Urain","year":"2024","journal-title":"arXiv e-prints"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01324"},{"key":"ref36","article-title":"Arcap: Collecting high-quality human demonstrations for robot learning with augmented reality feedback","author":"Chen","year":"2024","journal-title":"arXiv preprint"},{"key":"ref37","article-title":"Okami: Teaching humanoid robots manipulation skills through single video imitation","author":"Li","year":"2024","journal-title":"arXiv preprint"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2017.167"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1017\/cbo9780511811685"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/3DV.2019.00043"},{"key":"ref41","first-page":"32","article-title":"Watch and match: Supercharging imitation with regularized optimal transport","volume-title":"Conference on Robot Learning.","author":"Haldar","year":"2023"},{"key":"ref42","volume-title":"Lang-segment-anything","author":"Medeiros","year":"2023"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73033-7_2"},{"key":"ref46","doi-asserted-by":"crossref","first-page":"823","DOI":"10.1103\/PhysRev.36.823","article-title":"On the theory of the brownian motion","volume":"36","author":"Uhlenbeck","year":"1930","journal-title":"Phys. Rev."},{"key":"ref47","article-title":"Continuous control with deep reinforcement learning","author":"Lillicrap","year":"2015","journal-title":"arXiv preprint"},{"key":"ref48","article-title":"Mastering visual continuous control: Improved data-augmented reinforcement learning","author":"Yarats","year":"2021","journal-title":"arXiv preprint"},{"key":"ref49","first-page":"652","article-title":"Pointnet: Deep learning on point sets for 3d classification and segmentation","volume-title":"Proceedings of the IEEE conference on computer vision and pattern recognition","author":"Qi","year":"2017"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"}],"event":{"name":"2025 IEEE International Conference on Robotics and Automation (ICRA)","location":"Atlanta, GA, USA","start":{"date-parts":[[2025,5,19]]},"end":{"date-parts":[[2025,5,23]]}},"container-title":["2025 IEEE International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11127273\/11127223\/11128690.pdf?arnumber=11128690","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,3]],"date-time":"2025-09-03T06:15:01Z","timestamp":1756880101000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11128690\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,19]]},"references-count":50,"URL":"https:\/\/doi.org\/10.1109\/icra55743.2025.11128690","relation":{},"subject":[],"published":{"date-parts":[[2025,5,19]]}}}