{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,5,13]],"date-time":"2025-05-13T06:40:11Z","timestamp":1747118411230,"version":"3.40.5"},"reference-count":35,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"6","license":[{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2025,6]]},"DOI":"10.1109\/lra.2025.3564701","type":"journal-article","created":{"date-parts":[[2025,4,28]],"date-time":"2025-04-28T17:34:55Z","timestamp":1745861695000},"page":"6248-6255","source":"Crossref","is-referenced-by-count":0,"title":["Should We Learn Contact-Rich Manipulation Policies From Sampling-Based Planners?"],"prefix":"10.1109","volume":"10","author":[{"ORCID":"https:\/\/orcid.org\/0009-0000-5986-9614","authenticated-orcid":false,"given":"Huaijiang","family":"Zhu","sequence":"first","affiliation":[{"name":"New York University, New York, NY, USA"}]},{"given":"Tong","family":"Zhao","sequence":"additional","affiliation":[{"name":"Boston Dynamics AI Institute, Cambridge, MA, USA"}]},{"given":"Xinpei","family":"Ni","sequence":"additional","affiliation":[{"name":"Boston Dynamics AI Institute, Cambridge, MA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6881-9918","authenticated-orcid":false,"given":"Jiuguang","family":"Wang","sequence":"additional","affiliation":[{"name":"Boston Dynamics AI Institute, Cambridge, MA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7620-3117","authenticated-orcid":false,"given":"Kuan","family":"Fang","sequence":"additional","affiliation":[{"name":"Boston Dynamics AI Institute, Cambridge, MA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6458-9112","authenticated-orcid":false,"given":"Ludovic","family":"Righetti","sequence":"additional","affiliation":[{"name":"New York University, New York, NY, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7569-4535","authenticated-orcid":false,"given":"Tao","family":"Pang","sequence":"additional","affiliation":[{"name":"Boston Dynamics AI Institute, Cambridge, MA, USA"}]}],"member":"263","reference":[{"key":"ref1","first-page":"991","article-title":"BC-Z: Zero-shot task generalization with robotic imitation learning","volume-title":"Proc. Conf. Robot Learn.","author":"Jang","year":"2022"},{"key":"ref2","first-page":"894","article-title":"CLIPort: What and where pathways for robotic manipulation","volume-title":"Proc. Conf. Robot Learn.","author":"Shridhar","year":"2022"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1177\/02783649241273668"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2023.XIX.016"},{"key":"ref5","first-page":"1910","article-title":"ALOHA unleashed: A simple recipe for robot dexterity","volume-title":"Proc. Conf. Robot Learn.","author":"Zhao","year":"2025"},{"key":"ref6","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Ho","year":"2020"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2023.XIX.028"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2024.XX.045"},{"article-title":"Scaling laws for neural language models","year":"2020","author":"Kaplan","key":"ref9"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01494"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.abk2822"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.adc9244"},{"key":"ref13","first-page":"80375","article-title":"Data quality in imitation learning","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Belkhale","year":"2024"},{"key":"ref14","first-page":"1722","article-title":"In-hand object rotation via rapid motor adaptation","volume-title":"Proc. Conf. Robot Learn.","author":"Qi","year":"2023"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2023.xix.036"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10160216"},{"article-title":"Neural MP: A generalist neural motion planner","year":"2024","author":"Dalal","key":"ref17"},{"key":"ref18","first-page":"1218","article-title":"Learning locomotion skills from MPC in sensor space","volume-title":"Proc. Learn. Dyn. Control Conf.","author":"Khadiv","year":"2023"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/IROS51168.2021.9636346"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2023.3333699"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2023.3300230"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/IROS55552.2023.10341813"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2024.XX.132"},{"article-title":"Dexterous contact-rich manipulation via the contact trust region","year":"2025","author":"Suh","key":"ref24"},{"article-title":"Dojo: A differentiable physics engine for robotics","year":"2022","author":"Howell","key":"ref25"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3146931"},{"key":"ref27","first-page":"20668","article-title":"Do differentiable simulators give better policy gradients?","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Suh","year":"2022"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9560941"},{"key":"ref29","article-title":"Goal-conditioned imitation learning","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Ding","year":"2019"},{"key":"ref30","first-page":"1025","article-title":"Relay policy learning: Solving long-horizon tasks via imitation and reinforcement learning","volume-title":"Proc. Conf. Robot Learn.","volume":"100","author":"Gupta","year":"2020"},{"key":"ref31","article-title":"Learning to reach goals via iterated supervised learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Ghosh","year":"2021"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11671"},{"key":"ref34","article-title":"Decoupled weight decay regularization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Loshchilov","year":"2017"},{"key":"ref35","first-page":"627","article-title":"A reduction of imitation learning and structured prediction to no-regret online learning","volume-title":"Proc. 14th Int. Conf. Artif. Intell. Statist. Workshop Conf. Proc.","author":"Ross","year":"2011"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7083369\/10969146\/10977833.pdf?arnumber=10977833","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,13]],"date-time":"2025-05-13T06:10:29Z","timestamp":1747116629000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10977833\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6]]},"references-count":35,"journal-issue":{"issue":"6"},"URL":"https:\/\/doi.org\/10.1109\/lra.2025.3564701","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"type":"electronic","value":"2377-3766"},{"type":"electronic","value":"2377-3774"}],"subject":[],"published":{"date-parts":[[2025,6]]}}}