{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,19]],"date-time":"2025-12-19T10:14:28Z","timestamp":1766139268927,"version":"3.44.0"},"reference-count":44,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,5,19]],"date-time":"2025-05-19T00:00:00Z","timestamp":1747612800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,5,19]],"date-time":"2025-05-19T00:00:00Z","timestamp":1747612800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62302143"],"award-info":[{"award-number":["62302143"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003995","name":"Anhui Provincial Natural Science Foundation","doi-asserted-by":"publisher","award":["2308085QF207"],"award-info":[{"award-number":["2308085QF207"]}],"id":[{"id":"10.13039\/501100003995","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Project of China","doi-asserted-by":"publisher","award":["2022ZD0160102"],"award-info":[{"award-number":["2022ZD0160102"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,5,19]]},"DOI":"10.1109\/icra55743.2025.11127736","type":"proceedings-article","created":{"date-parts":[[2025,9,2]],"date-time":"2025-09-02T17:28:56Z","timestamp":1756834136000},"page":"8980-8987","source":"Crossref","is-referenced-by-count":2,"title":["UniAff: A Unified Representation of Affordances for Tool Usage and Articulation with Vision-Language Models"],"prefix":"10.1109","author":[{"given":"Qiaojun","family":"Yu","sequence":"first","affiliation":[{"name":"Shanghai Jiao Tong University,China"}]},{"given":"Siyuan","family":"Huang","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,China"}]},{"given":"Xibin","family":"Yuan","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,China"}]},{"given":"Zhengkai","family":"Jiang","sequence":"additional","affiliation":[{"name":"Hong Kong University of Science and Technology,HongKong"}]},{"given":"Ce","family":"Hao","sequence":"additional","affiliation":[{"name":"National University of Singapore,Singapore"}]},{"given":"Xin","family":"Li","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,China"}]},{"given":"Haonan","family":"Chang","sequence":"additional","affiliation":[{"name":"Rutgers University,United States of America"}]},{"given":"Junbo","family":"Wang","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,China"}]},{"given":"Liu","family":"Liu","sequence":"additional","affiliation":[{"name":"Hefei University of Technology,China"}]},{"given":"Hongsheng","family":"Li","sequence":"additional","affiliation":[{"name":"CUHK-MMLab,China"}]},{"given":"Peng","family":"Gao","sequence":"additional","affiliation":[{"name":"Shanghai AI Lab,China"}]},{"given":"Cewu","family":"Lu","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,China"}]}],"member":"263","reference":[{"doi-asserted-by":"publisher","key":"ref1","DOI":"10.1126\/science.aaz7597"},{"doi-asserted-by":"publisher","key":"ref2","DOI":"10.15607\/rss.2024.xx.016"},{"doi-asserted-by":"publisher","key":"ref3","DOI":"10.1109\/CVPR52729.2023.00684"},{"doi-asserted-by":"publisher","key":"ref4","DOI":"10.1109\/ICRA57147.2024.10610652"},{"key":"ref5","article-title":"Learning precise affordances from egocentric videos for robotic manipulation","author":"Li","year":"2024","journal-title":"arXiv preprint"},{"doi-asserted-by":"publisher","key":"ref6","DOI":"10.1109\/IROS58592.2024.10801352"},{"key":"ref7","article-title":"A3vlm: Actionable articulation-aware vision language model","author":"Huang","year":"2024","journal-title":"arXiv preprint"},{"doi-asserted-by":"publisher","key":"ref8","DOI":"10.1109\/CVPR52733.2024.01710"},{"doi-asserted-by":"publisher","key":"ref9","DOI":"10.1109\/IROS58592.2024.10801993"},{"doi-asserted-by":"publisher","key":"ref10","DOI":"10.1109\/IROS55552.2023.10341672"},{"doi-asserted-by":"publisher","key":"ref11","DOI":"10.1109\/ICRA.2015.7139369"},{"doi-asserted-by":"publisher","key":"ref12","DOI":"10.1109\/ICRA57147.2024.10610744"},{"doi-asserted-by":"publisher","key":"ref13","DOI":"10.1109\/LRA.2024.3381482"},{"key":"ref14","article-title":"Sphinx: The joint mixing of weights, tasks, and visual embeddings for multi-modal large language models","author":"Lin","year":"2023","journal-title":"arXiv preprint"},{"doi-asserted-by":"publisher","key":"ref15","DOI":"10.1109\/CVPR52733.2024.00915"},{"doi-asserted-by":"publisher","key":"ref16","DOI":"10.1109\/ICRA57147.2024.10610090"},{"doi-asserted-by":"publisher","key":"ref17","DOI":"10.1007\/978-3-031-73226-3_12"},{"volume-title":"Pace: Pose annotations in cluttered environments","year":"2024","author":"You","key":"ref18"},{"doi-asserted-by":"publisher","key":"ref19","DOI":"10.1109\/CVPR52729.2023.00084"},{"doi-asserted-by":"publisher","key":"ref20","DOI":"10.1109\/CVPR42600.2020.01111"},{"key":"ref21","first-page":"894","article-title":"Cliport: What and where pathways for robotic manipulation","volume-title":"Conference on robot learning","author":"Shridhar"},{"key":"ref22","first-page":"665","article-title":"Vlmbench: A compositional benchmark for vision-and-language manipulation","volume":"35","author":"Zheng","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref23","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"International conference on machine learning","author":"Radford"},{"key":"ref24","first-page":"175","article-title":"Instruction-driven history-aware policies for robotic manipulations","volume-title":"Conference on Robot Learning","author":"Guhur"},{"key":"ref25","first-page":"785","article-title":"Perceiver-actor: A multi-task transformer for robotic manipulation","volume-title":"Conference on Robot Learning","author":"Shridhar"},{"doi-asserted-by":"publisher","key":"ref26","DOI":"10.1007\/s10514-023-10139-z"},{"key":"ref27","article-title":"Bridging zero-shot object navigation and foundation models through pixel-guided navigation skill","author":"Cai","year":"2023","journal-title":"arXiv preprint"},{"key":"ref28","article-title":"Lgmcts: Language-guided monte-carlo tree search for executable semantic object rearrangement","author":"Chang","year":"2023","journal-title":"arXiv preprint"},{"doi-asserted-by":"publisher","key":"ref29","DOI":"10.1109\/ICRA48891.2023.10160591"},{"key":"ref30","article-title":"Voxposer: Composable 3d value maps for robotic manipulation with language models","author":"Huang","year":"2023","journal-title":"arXiv preprint"},{"doi-asserted-by":"publisher","key":"ref31","DOI":"10.1109\/IROS58592.2024.10801816"},{"key":"ref32","article-title":"Robonurse-vla: Robotic scrub nurse system based on vision-language-action model","author":"Li","year":"2024","journal-title":"arXiv preprint"},{"key":"ref33","article-title":"Open x-embodiment: Robotic learning datasets and rt-x models","author":"Padalkar","year":"2023","journal-title":"arXiv preprint"},{"volume-title":"8th Annual Conference on Robot Learning","author":"Xiong","article-title":"Autonomous interactive correction mllm for robust robotic manipulation","key":"ref34"},{"key":"ref35","article-title":"Visual instruction tuning","volume":"36","author":"Liu","year":"2024","journal-title":"Advances in neural information processing systems"},{"key":"ref36","article-title":"Sphinx-x: Scaling data and parameters for a family of multi-modal large language models","author":"Gao","year":"2024","journal-title":"arXiv preprint"},{"key":"ref37","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023","journal-title":"arXiv preprint"},{"key":"ref38","article-title":"Dinov2: Learning robust visual features without supervision","author":"Oquab","year":"2023","journal-title":"arXiv preprint"},{"key":"ref39","first-page":"19730","article-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"International conference on machine learning","author":"Li"},{"doi-asserted-by":"publisher","key":"ref40","DOI":"10.1109\/ICCV51070.2023.00371"},{"doi-asserted-by":"publisher","key":"ref41","DOI":"10.1109\/ICCV48922.2021.00674"},{"key":"ref42","article-title":"Vat-mart: Learning visual action trajectory proposals for manipulating 3d articulated objects","author":"Wu","year":"2021","journal-title":"arXiv preprint"},{"doi-asserted-by":"publisher","key":"ref43","DOI":"10.1109\/LRA.2022.3142397"},{"doi-asserted-by":"publisher","key":"ref44","DOI":"10.1109\/CVPR42600.2020.01146"}],"event":{"name":"2025 IEEE International Conference on Robotics and Automation (ICRA)","start":{"date-parts":[[2025,5,19]]},"location":"Atlanta, GA, USA","end":{"date-parts":[[2025,5,23]]}},"container-title":["2025 IEEE International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11127273\/11127223\/11127736.pdf?arnumber=11127736","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,3]],"date-time":"2025-09-03T06:23:30Z","timestamp":1756880610000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11127736\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,19]]},"references-count":44,"URL":"https:\/\/doi.org\/10.1109\/icra55743.2025.11127736","relation":{},"subject":[],"published":{"date-parts":[[2025,5,19]]}}}