{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T20:59:50Z","timestamp":1774558790305,"version":"3.50.1"},"reference-count":76,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"Joint Funds of the National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U24A20128"],"award-info":[{"award-number":["U24A20128"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"Zhejiang Provincial Natural Science Foundation of China","doi-asserted-by":"publisher","award":["LD25F030001"],"award-info":[{"award-number":["LD25F030001"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Automat. Sci. Eng."],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tase.2025.3606549","type":"journal-article","created":{"date-parts":[[2025,9,5]],"date-time":"2025-09-05T18:27:51Z","timestamp":1757096871000},"page":"21256-21268","source":"Crossref","is-referenced-by-count":1,"title":["Efficient Alignment of Unconditioned Action Prior for Language-Conditioned Pick and Place in Clutter"],"prefix":"10.1109","volume":"22","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3632-917X","authenticated-orcid":false,"given":"Kechun","family":"Xu","sequence":"first","affiliation":[{"name":"Zhejiang University, Hangzhou, China"}]},{"given":"Xunlong","family":"Xia","sequence":"additional","affiliation":[{"name":"Alibaba Cloud, Hangzhou, China"}]},{"given":"Kaixuan","family":"Wang","sequence":"additional","affiliation":[{"name":"Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-3460-8394","authenticated-orcid":false,"given":"Yifei","family":"Yang","sequence":"additional","affiliation":[{"name":"Zhejiang University, Hangzhou, China"}]},{"given":"Yunxuan","family":"Mao","sequence":"additional","affiliation":[{"name":"Zhejiang University, Hangzhou, China"}]},{"given":"Bing","family":"Deng","sequence":"additional","affiliation":[{"name":"Alibaba Cloud, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8662-5818","authenticated-orcid":false,"given":"Jieping","family":"Ye","sequence":"additional","affiliation":[{"name":"Alibaba Cloud, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9318-9014","authenticated-orcid":false,"given":"Rong","family":"Xiong","sequence":"additional","affiliation":[{"name":"Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0981-935X","authenticated-orcid":false,"given":"Yue","family":"Wang","sequence":"additional","affiliation":[{"name":"Zhejiang University, Hangzhou, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3092640"},{"key":"ref2","first-page":"726","article-title":"Transporter networks: Rearranging the visual world for robotic manipulation","volume-title":"Proc. Conf. Robot Learn.","author":"Zeng"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2021.XVII.072"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01437"},{"key":"ref5","first-page":"1001","article-title":"Selective object rearrangement in clutter","volume-title":"Proc. Conf. Robot Learn.","author":"Tang"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2024.3502520"},{"key":"ref7","first-page":"894","article-title":"CLIPort: What and where pathways for robotic manipulation","volume-title":"Proc. Conf. Robot Learn.","author":"Shridhar"},{"key":"ref8","first-page":"785","article-title":"Perceiver-actor: A multi-task transformer for robotic manipulation","volume-title":"Proc. 6th Annu. Conf. Robot Learn.","author":"Shridhar"},{"key":"ref9","first-page":"21768","article-title":"VIMA: General robot manipulation with multimodal prompts","volume-title":"Proc. 40th Int. Conf. Mach. Learn.","author":"Jiang"},{"key":"ref10","first-page":"284","article-title":"GNFactor: Multi-task real robot learning with generalizable neural feature fields","volume-title":"Proc. Conf. Robot Learn.","author":"Ze"},{"key":"ref11","first-page":"3949","article-title":"Act3D: 3D feature field transformers for multi-task robotic manipulation","volume-title":"Proc. 7th Annu. Conf. Robot Learn.","author":"Gervet"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2024.XX.055"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA46639.2022.9811817"},{"key":"ref14","first-page":"1443","article-title":"GraspSplats: Efficient manipulation with 3D feature splatting","volume-title":"Proc. 8th Annu. Conf. Robot Learn.","author":"Ji"},{"key":"ref15","first-page":"4748","article-title":"Splat-MOVER: Multi-stage, open-vocabulary robotic manipulation via editable Gaussian splatting","volume-title":"Proc. 8th Annu. Conf. Robot Learn.","author":"Shorinwa"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2024.3432348"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2023.xix.066"},{"key":"ref18","first-page":"272","article-title":"D3Fields: Dynamic 3D descriptor fields for zero-shot generalizable rearrangement","volume-title":"Proc. 8th Annu. Conf. Robot Learn.","author":"Wang"},{"key":"ref19","first-page":"178","article-title":"Language embedded radiance fields for zero-shot task-oriented grasping","volume-title":"Proc. 7th Annu. Conf. Robot Learn.","author":"Rashid"},{"key":"ref20","first-page":"287","article-title":"Do as I can, not as I say: Grounding language in robotic affordances","volume-title":"Proc. Conf. Robot Learn.","author":"Ahn"},{"key":"ref21","first-page":"540","article-title":"VoxPoser: Composable 3D value maps for robotic manipulation with language models","volume-title":"Proc. Conf. Robot Learn.","author":"Huang"},{"key":"ref22","first-page":"3568","article-title":"ThinkGrasp: A vision-language system for strategic part grasping in clutter","volume-title":"Proc. 8th Annu. Conf. Robot Learn.","author":"Qian"},{"key":"ref23","article-title":"Training a helpful and harmless assistant with reinforcement learning from human feedback","author":"Bai","year":"2022","journal-title":"arXiv:2204.05862"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01146"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19815-1_40"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487583"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA46639.2022.9811547"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2022.3191596"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10160889"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1177\/0278364919868017"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9197318"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8461041"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/COASE.2018.8560406"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/IROS45743.2020.9341545"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.2970622"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10160779"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA57147.2024.10610792"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA57147.2024.10611277"},{"key":"ref39","article-title":"Learning efficient and robust language-conditioned manipulation using textual-visual relevancy and equivariant language mapping","author":"Jia","year":"2024","journal-title":"arXiv:2406.15677"},{"key":"ref40","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"139","author":"Radford"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"ref42","first-page":"1","article-title":"DINOv2: Learning robust visual features without supervision","volume":"2023","author":"Oquab","year":"2023","journal-title":"Trans. Mach. Learn. Res. J."},{"key":"ref43","volume-title":"GPT-4V(ision) System Card","year":"2023"},{"key":"ref44","article-title":"Instruct2Act: Mapping multi-modality instructions to robotic actions with large language model","author":"Huang","year":"2023","journal-title":"arXiv:2305.11176"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10161041"},{"key":"ref46","first-page":"1199","article-title":"VIOLA: Imitation learning for vision-based manipulation with object proposal priors","volume-title":"Proc. Conf. Robot Learn.","author":"Zhu"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2024.3353484"},{"key":"ref48","first-page":"3619","article-title":"M2T2: Multi-task masked transformer for object-centric pick and place","volume-title":"Proc. 7th Annu. Conf. Robot Learn.","author":"Yuan"},{"key":"ref49","first-page":"31489","article-title":"SNeRL: Semantic-aware neural radiance fields for reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Shim"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2021.XVII.024"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA46639.2022.9812146"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2023.xix.074"},{"key":"ref53","first-page":"1949","article-title":"3D diffuser actor: Policy diffusion with 3D scene representations","volume-title":"Proc. 8th Annu. Conf. Robot Learn.","author":"Ke"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2024.3511401"},{"key":"ref55","first-page":"405","article-title":"Distilled feature fields enable few-shot language-guided manipulation","volume-title":"Proc. Conf. Robot Learn.","author":"Shen"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10160591"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2024.3387941"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2024.3477090"},{"key":"ref59","article-title":"Look before you leap: Unveiling the power of GPT-4V in robotic vision-language planning","volume-title":"Proc. 1st Workshop Vis.-Lang. Models Navigat. Manipulation ICRA","author":"Hu"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2023.3281153"},{"key":"ref61","first-page":"6892","article-title":"Open X-embodiment: Robotic learning datasets and RT-X models","volume-title":"Proc. IEEE Int. Conf. Robot. Autom.","author":"O\u2019Neill"},{"key":"ref62","article-title":"Octo: An open-source generalist robot policy","author":"Model Team","year":"2024","journal-title":"arXiv:2405.12213"},{"key":"ref63","first-page":"2679","article-title":"OpenVLA: An open-source vision-language-action model","volume-title":"Proc. 8th Annu. Conf. Robot Learn.","author":"Kim"},{"key":"ref64","article-title":"RDT-1B: A diffusion foundation model for bimanual manipulation","author":"Liu","year":"2024","journal-title":"arXiv:2410.07864"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2025.xxi.010"},{"key":"ref66","first-page":"1975","article-title":"HomeRobot: Open-vocabulary mobile manipulation","volume-title":"Proc. Conf. Robot Learn.","author":"Yenamandra"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_24"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2023.127063"},{"key":"ref70","volume-title":"PyBullet, a Python Module for Physics Simulation for Games, Robotics and Machine Learning","author":"Coumans","year":"2016"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1145\/3592433"},{"key":"ref72","volume-title":"OpenAI. GPT-4O: OpenAI\u2019s Multimodal Vision-Language System","year":"2023"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/ICARM58088.2023.10218865"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1812.05905"},{"key":"ref75","article-title":"Soft actor-critic for discrete action settings","author":"Christodoulou","year":"2019","journal-title":"arXiv:1910.07207"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2023.3242169"}],"container-title":["IEEE Transactions on Automation Science and Engineering"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/8856\/10839176\/11152358.pdf?arnumber=11152358","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T19:53:17Z","timestamp":1774554797000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11152358\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":76,"URL":"https:\/\/doi.org\/10.1109\/tase.2025.3606549","relation":{},"ISSN":["1545-5955","1558-3783"],"issn-type":[{"value":"1545-5955","type":"print"},{"value":"1558-3783","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]}}}