{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,27]],"date-time":"2026-01-27T18:09:32Z","timestamp":1769537372239,"version":"3.49.0"},"reference-count":47,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,19]]},"DOI":"10.1109\/iros60139.2025.11247667","type":"proceedings-article","created":{"date-parts":[[2025,11,27]],"date-time":"2025-11-27T18:54:45Z","timestamp":1764269685000},"page":"4299-4306","source":"Crossref","is-referenced-by-count":0,"title":["PAVLM: Advancing Point Cloud based Affordance Understanding Via Vision-Language Model"],"prefix":"10.1109","author":[{"given":"Shang-Ching","family":"Liu","sequence":"first","affiliation":[{"name":"University of Hamburg,Technical Aspects of Multimodal Systems (TAMS),Department of Informatics"}]},{"given":"Van Nhiem","family":"Tran","sequence":"additional","affiliation":[{"name":"Hon Hai Research Institute (HHRI)"}]},{"given":"Wenkai","family":"Chen","sequence":"additional","affiliation":[{"name":"University of Hamburg,Technical Aspects of Multimodal Systems (TAMS),Department of Informatics"}]},{"given":"Wei-Lun","family":"Cheng","sequence":"additional","affiliation":[{"name":"National Taiwan University,Department of Electrical Engineering"}]},{"given":"Yen-Lin","family":"Huang","sequence":"additional","affiliation":[{"name":"National Tsinghua University,Department of Computer Science and Technology"}]},{"given":"I-Bin","family":"Liao","sequence":"additional","affiliation":[{"name":"Hon Hai Research Institute (HHRI)"}]},{"given":"Yung-Hui","family":"Li","sequence":"additional","affiliation":[{"name":"Hon Hai Research Institute (HHRI)"}]},{"given":"Jianwei","family":"Zhang","sequence":"additional","affiliation":[{"name":"University of Hamburg,Technical Aspects of Multimodal Systems (TAMS),Department of Informatics"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TII.2024.3393007"},{"key":"ref2","first-page":"14667","article-title":"AffordPose: A large-scale dataset of hand-object interactions with affordance-driven hand pose","volume-title":"IEEE\/CVF International Conference on Computer Vision (ICCV)","author":"Jian"},{"key":"ref3","article-title":"Visual memory for robust path following","volume-title":"Neural Information Processing Systems (NIPS)","author":"Kumar","year":"2018"},{"key":"ref4","doi-asserted-by":"crossref","DOI":"10.15607\/RSS.2022.XVIII.026","article-title":"Human-to-robot imitation in the wild","author":"Bahl","year":"2022"},{"key":"ref5","first-page":"10922","article-title":"Locate: Localize and transfer object parts for weakly supervised affordance grounding","volume-title":"IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","author":"Li"},{"key":"ref6","article-title":"Deep object pose estimation for semantic robotic grasping of household objects","author":"Tremblay","year":"2018"},{"key":"ref7","article-title":"Affordances in robotic tasks\u2013a survey","author":"Ard\u00f3n","year":"2020"},{"key":"ref8","article-title":"Visual instruction tuning","author":"Liu","year":"2023"},{"key":"ref9","article-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"International Conference on Machine Learning (ICML)","author":"Li"},{"key":"ref10","article-title":"Qwen-vl: A frontier large vision-language model with versatile abilities","author":"Bai","year":"2023"},{"key":"ref11","article-title":"Phi-3 technical report: A highly capable language model locally on your phone","author":"Abdin","year":"2024"},{"key":"ref12","article-title":"Pixtral-12b-2409: Vision-language model","year":"2024"},{"key":"ref13","doi-asserted-by":"crossref","DOI":"10.1109\/CVPRW63382.2024.00754","article-title":"AffordanceLLM: Grounding affordance from vision language models","author":"Qian","year":"2024"},{"key":"ref14","first-page":"5197","article-title":"Weakly supervised affordance detection","volume-title":"IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","author":"Sawatzky"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00836"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00249"},{"key":"ref17","doi-asserted-by":"crossref","DOI":"10.1109\/CVPR52729.2023.00120","article-title":"Ulip: Learning a unified representation of language, images, and point clouds for 3d understanding","author":"Xue","year":"2023"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02558"},{"key":"ref19","article-title":"3d-llm: Injecting the 3d world into large language models","author":"Hong","year":"2023"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1145\/3446370"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2016.7759429"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2019.2894439"},{"key":"ref23","doi-asserted-by":"crossref","DOI":"10.1109\/ICRA.2018.8460902","article-title":"AffordanceNet: An end-to-end deep learning approach for object affordance detection","author":"Do","year":"2018"},{"key":"ref24","article-title":"PartAfford: Part-level Affordance Discovery from 3D Objects","author":"Xu","year":"2022"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/IROS47612.2022.9981900"},{"key":"ref26","article-title":"PointNet++: Deep hierarchical feature learning on point sets in a metric space","author":"Qi","year":"2017"},{"key":"ref27","doi-asserted-by":"crossref","DOI":"10.1007\/978-3-031-20086-1_35","article-title":"Masked autoencoders for point cloud self-supervised learning","author":"Pang","year":"2022"},{"key":"ref28","article-title":"Towards compact 3D representations via point feature enhancement masked autoencoders","author":"Zha","year":"2023"},{"key":"ref29","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018"},{"key":"ref30","first-page":"19291","article-title":"Pointbert: Pre-training 3d point cloud transformers with masked point modeling","volume-title":"IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","author":"Yu"},{"key":"ref31","article-title":"Point-Bind & Point-LLM: Aligning point cloud with multi-modality for 3D understanding, generation, and instruction following","author":"Guo","year":"2023"},{"key":"ref32","doi-asserted-by":"crossref","DOI":"10.1109\/CVPR52729.2023.01457","article-title":"ImageBind: One embedding space to bind them all","author":"Girdhar","year":"2023"},{"key":"ref33","article-title":"ImageBind-LLM: Multi-modality instruction tuning","author":"Han","year":"2023"},{"key":"ref34","article-title":"LLaMA-Adapter: Efficient fine-tuning of language models with zero-init attention","author":"Zhang","year":"2023"},{"key":"ref35","article-title":"Visual instruction tuning","author":"Liu","year":"2023"},{"key":"ref36","article-title":"A call for embodied AI","author":"Paolo","year":"2024"},{"key":"ref37","article-title":"VoxPoser: Composable 3D value maps for robotic manipulation with language models","author":"Huang","year":"2023"},{"key":"ref38","doi-asserted-by":"crossref","DOI":"10.1109\/ICRA48891.2023.10160591","article-title":"Code as Policies: Language model programs for embodied control","author":"Liang","year":"2023"},{"key":"ref39","doi-asserted-by":"crossref","DOI":"10.1109\/IROS58592.2024.10801352","article-title":"CoPa: General robotic manipulation through spatial constraints of parts with foundation models","author":"Huang","year":"2024"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01351"},{"key":"ref41","article-title":"Llama-adapter: Efficient fine-tuning of language models with zero-init attention","author":"Zhang","year":"2023"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72670-5_20"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2018.09.001"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA57147.2024.10610008"},{"key":"ref45","volume-title":"Llama 3 model card","year":"2024"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00182"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW63382.2024.00754"}],"event":{"name":"2025 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","location":"Hangzhou, China","start":{"date-parts":[[2025,10,19]]},"end":{"date-parts":[[2025,10,25]]}},"container-title":["2025 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11245651\/11245652\/11247667.pdf?arnumber=11247667","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,27]],"date-time":"2026-01-27T04:47:06Z","timestamp":1769489226000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11247667\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,19]]},"references-count":47,"URL":"https:\/\/doi.org\/10.1109\/iros60139.2025.11247667","relation":{},"subject":[],"published":{"date-parts":[[2025,10,19]]}}}