{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T21:53:29Z","timestamp":1774043609508,"version":"3.50.1"},"reference-count":49,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2023YFB4705004"],"award-info":[{"award-number":["2023YFB4705004"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["52205523"],"award-info":[{"award-number":["52205523"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["52188102"],"award-info":[{"award-number":["52188102"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Advanced Engineering Informatics"],"published-print":{"date-parts":[[2026,5]]},"DOI":"10.1016\/j.aei.2026.104508","type":"journal-article","created":{"date-parts":[[2026,2,27]],"date-time":"2026-02-27T09:56:52Z","timestamp":1772186212000},"page":"104508","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["VLM-PoseManip: Dexterous robotic manipulation via Vision-Language model based instructive pose estimation for Human-Robot collaboration"],"prefix":"10.1016","volume":"72","author":[{"given":"Enguang","family":"Wang","sequence":"first","affiliation":[]},{"given":"Wencan","family":"Pei","sequence":"additional","affiliation":[]},{"given":"Yiping","family":"Gao","sequence":"additional","affiliation":[]},{"given":"Chenyi","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Xinyu","family":"Li","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1485-0722","authenticated-orcid":false,"given":"Liang","family":"Gao","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.aei.2026.104508_b0005","article-title":"Human-Robot Collaboration in Mixed-Flow Assembly Line Balancing under uncertainty: an Efficient Discrete Bees Algorithm","volume":"41","author":"Zhang","year":"2024","journal-title":"J. Ind. Inf. Integr."},{"key":"10.1016\/j.aei.2026.104508_b0010","doi-asserted-by":"crossref","DOI":"10.1016\/j.aei.2025.103613","article-title":"Integration of dynamic knowledge and LLM for adaptive human-robot collaborative assembly solution generation","volume":"68","author":"Hua","year":"2025","journal-title":"Adv. Eng. Inf."},{"key":"10.1016\/j.aei.2026.104508_b0015","doi-asserted-by":"crossref","DOI":"10.1016\/j.aei.2026.104370","article-title":"Physics-informed embodied intelligence in the foundation model era: advancing robot manipulation for smart manufacturing","volume":"71","author":"Li","year":"2026","journal-title":"Adv. Eng. Inf."},{"key":"10.1016\/j.aei.2026.104508_b0020","doi-asserted-by":"crossref","first-page":"199","DOI":"10.1016\/j.jmsy.2021.11.001","article-title":"A futuristic perspective on human-centric assembly","volume":"62","author":"Wang","year":"2022","journal-title":"J. Manuf. Syst."},{"key":"10.1016\/j.aei.2026.104508_b0025","first-page":"2165","article-title":"Rt-2: Vision-language-action models transfer web knowledge to robotic control","author":"Zitkovich","year":"2023","journal-title":"Conference on Robot Learning, PMLR"},{"key":"10.1016\/j.aei.2026.104508_b0030","first-page":"3988","article-title":"TinyVLA: Toward Fast","volume":"10","author":"Wen","year":"2025","journal-title":"Data-Efficient Vision-Language-Action Models for Robotic Manipulation, IEEE Robotics and Automation Letters."},{"key":"10.1016\/j.aei.2026.104508_b0035","first-page":"1702","article-title":"Cot-VLA: Visual chain-of-thought reasoning for vision-language-action models","author":"Zhao","year":"2025","journal-title":"Computer Vision and Pattern Recognition Conference (CVPR)"},{"key":"10.1016\/j.aei.2026.104508_b0040","series-title":"IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","first-page":"7580","article-title":"Injecting robotic affordance and physically grounded information into multi-modal large language models","author":"Huang","year":"2024"},{"key":"10.1016\/j.aei.2026.104508_b0045","unstructured":"Y. Wei, M. Lin, Y. Lin, J. Jiang, X. Wu, L. Zeng, W. Zheng, AffordDexGrasp: Open-set Language-guided Dexterous Grasp with Generalizable-Instructive Affordance.arXiv preprint arXiv:2503.07360. https:\/\/arxiv.org\/abs\/2503.07360."},{"key":"10.1016\/j.aei.2026.104508_b0050","doi-asserted-by":"crossref","unstructured":"Black, K., Brown, N., Driess, D., Esmail, A., Equi, M., Finn, C., ... & Zhilinsky, U.\u03c00: A Vision-Language-Action Flow Model for General Robot Control. (2024). arXiv preprint arXiv:2410.24164.","DOI":"10.15607\/RSS.2025.XXI.010"},{"key":"10.1016\/j.aei.2026.104508_b0055","doi-asserted-by":"crossref","unstructured":"Liu, H., Guo, S., Mai, P., Cao, J., Li, H., & Ma, J. RobodexVLM: Visual language model-enabled task planning and motion control for dexterous robot manipulation. (2025). arXiv preprint arXiv:2503.01616.","DOI":"10.1109\/IROS60139.2025.11247714"},{"key":"10.1016\/j.aei.2026.104508_b0060","unstructured":"Huang, W., Wang, C., Li, Y., Zhang, R., & Fei-Fei, L. Rekep: Spatio-temporal reasoning of relational keypoint constraints for robotic manipulation. (2024). arXiv preprint arXiv:2409.01652."},{"key":"10.1016\/j.aei.2026.104508_b0065","series-title":"In Proceedings of the Computer Vision and Pattern Recognition Conference","first-page":"17359","article-title":"Towards general robotic manipulation via object-centric interaction primitives as spatial constraints","author":"Pan","year":"2025"},{"key":"10.1016\/j.aei.2026.104508_b0070","article-title":"Empowering natural human\u2013robot collaboration through multimodal language models and spatial intelligence: Pathways and perspectives","volume":"97","author":"Wu","year":"2025","journal-title":"Rob. Comput. Integr. Manuf."},{"key":"10.1016\/j.aei.2026.104508_b0075","unstructured":"H. Zhou, X. Yao, O. Mees, Y. Meng, T. Xiao, Y. Bisk, J. Oh, E. Johns, M. Shridhar, D. Shah, J. Thomason, K. Huang, J. Chai, Z. Bing, A. Knoll, Bridging language and action: A survey of language-conditioned robot manipulation, arXiv preprint arXiv:2312.10807 (2023). https:\/\/arxiv.org\/abs\/2312.10807."},{"key":"10.1016\/j.aei.2026.104508_b0080","article-title":"Enhancing semantic search using ontologies: a hybrid information retrieval approach for industrial text","volume":"45","author":"Naqvi","year":"2025","journal-title":"Rob. Comput. Integr. Manuf."},{"key":"10.1016\/j.aei.2026.104508_b0085","article-title":"Overview: Application status and prospects of digital twin technology in mechanical cutting processing","volume":"45","author":"Li","year":"2025","journal-title":"J. Ind. Inf. Integr."},{"key":"10.1016\/j.aei.2026.104508_b0090","first-page":"38","article-title":"Grounding dino: Marrying dino with grounded pre-training for open-set object detection","author":"Liu","year":"2024","journal-title":"European Conference on Computer Vision."},{"key":"10.1016\/j.aei.2026.104508_b0095","first-page":"894","article-title":"Cliport: what and where pathways for robotic manipulation","volume":"164","author":"Shridhar","year":"2022","journal-title":"Conference on Robot Learning."},{"key":"10.1016\/j.aei.2026.104508_b0100","series-title":"IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","first-page":"9676","article-title":"Polaris Open-ended interactive robotic manipulation via syn2real visual grounding and large language models","author":"Wang","year":"2024"},{"key":"10.1016\/j.aei.2026.104508_b0105","doi-asserted-by":"crossref","first-page":"10567","DOI":"10.1109\/LRA.2024.3477090","article-title":"Gpt-4v (ision) for robotics: Multimodal task planning from human demonstration","volume":"9","author":"Wake","year":"2024","journal-title":"IEEE Rob. Autom. Lett."},{"key":"10.1016\/j.aei.2026.104508_b0110","unstructured":"R. Mirjalili, M. Krawez, S. Silenzi, Y. Blei, W. Burgard, Lan-grasp: Using large language models for semantic object grasping.arXiv preprint arXiv:2310.05239. https:\/\/arxiv.org\/abs\/2310.05239."},{"key":"10.1016\/j.aei.2026.104508_b0115","series-title":"IEEE International Conference on Robotics and Automation (ICRA)","first-page":"12462","article-title":"Physically grounded vision-language models for robotic manipulation","author":"Gao","year":"2024"},{"key":"10.1016\/j.aei.2026.104508_b0120","doi-asserted-by":"crossref","DOI":"10.1016\/j.rcim.2025.102978","article-title":"VLM-MSGraph: Vision Language Model-enabled Multi-hierarchical Scene Graph for robotic assembly","volume":"94","author":"Li","year":"2025","journal-title":"Rob. Comput. Integr. Manuf."},{"key":"10.1016\/j.aei.2026.104508_b0125","doi-asserted-by":"crossref","DOI":"10.1016\/j.rcim.2025.103030","article-title":"Large vision-language models enabled novel objects 6D pose estimation for human-robot collaboration","volume":"95","author":"Xia","year":"2025","journal-title":"Rob. Comput. Integr. Manuf."},{"key":"10.1016\/j.aei.2026.104508_b0130","unstructured":"G. Ponimatkin, M. C\u00edfka, T. Sou\u010dek, M. Fourmy, Y. Labb\u00e9, V. Petrik, J. Sivic, 6D Object Pose Tracking in Internet Videos for Robotic Manipulation. arXiv preprint arXiv:2503.10307. https:\/\/arxiv.org\/abs\/2503.10307."},{"key":"10.1016\/j.aei.2026.104508_b0135","series-title":"IEEE International Conference on Robotics and Automation (ICRA)","first-page":"3665","article-title":"Self-supervised 6d object pose estimation for robot manipulation","author":"Deng","year":"2020"},{"key":"10.1016\/j.aei.2026.104508_b0140","doi-asserted-by":"crossref","first-page":"1515","DOI":"10.1109\/LRA.2023.3240362","article-title":"I2c-net: using instance-level neural networks for monocular categorylevel 6d pose estimation","volume":"8","author":"Remus","year":"2023","journal-title":"IEEE Rob. Autom. Lett."},{"key":"10.1016\/j.aei.2026.104508_b0145","first-page":"17868","article-title":"Foundationpose Unified 6d pose estimation and tracking of novel objects","author":"Wen","year":"2024","journal-title":"IEEE\/CVF Conference on Computer Vision and Pattern Recognition(CVPR)"},{"key":"10.1016\/j.aei.2026.104508_b0150","doi-asserted-by":"crossref","first-page":"4990","DOI":"10.1109\/TIE.2024.3451150","article-title":"SGSIN: Simultaneous Grasp and Suction Inference Network via Attention-based Affordance Learning","volume":"72","author":"Wang","year":"2025","journal-title":"IEEE Trans. Ind. Electron."},{"key":"10.1016\/j.aei.2026.104508_b0155","doi-asserted-by":"crossref","first-page":"3226","DOI":"10.1109\/LRA.2022.3144794","article-title":"Probabilistic spatio-temporal fusion of affordances for grasping and manipulation","volume":"7","author":"Pohl","year":"2022","journal-title":"IEEE Rob. Autom. Lett."},{"key":"10.1016\/j.aei.2026.104508_b0160","series-title":"IEEE International Conference on Robotics and Automation (ICRA)","first-page":"2562","article-title":"Learning visual affordances with target-orientated deep q-network to grasp objects by harnessing environmental fixtures","author":"Liang","year":"2021"},{"key":"10.1016\/j.aei.2026.104508_b0165","doi-asserted-by":"crossref","unstructured":"L. Chen, Z. L, Z. Lu, Y. Wang, H. Nie, C. Yang, Domain-Invariant Feature Learning via Margin and Structure Priors for Robotic Grasping,IEEE Robotics and Automation Letters. 10(2025)1313-1320. 10.1109\/LRA.2024.3520437.","DOI":"10.1109\/LRA.2024.3520437"},{"key":"10.1016\/j.aei.2026.104508_b0170","article-title":"SS-ARGNet: a Novel Cascaded Schema for Robots\u20197-DoF Grasping in Adjacent and Stacked Object Scenarios","volume":"74","author":"Zhong","year":"2025","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"10.1016\/j.aei.2026.104508_b0175","doi-asserted-by":"crossref","DOI":"10.1109\/TIM.2024.3522557","article-title":"HBGNet: Robotic Grasp Detection using a Hybrid Network","volume":"74","author":"Zuo","year":"2025","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"10.1016\/j.aei.2026.104508_b0180","article-title":"MCS-ResNet: a Generative Robot Grasping Network based on RGB-D Fusion","volume":"74","author":"Pei","year":"2024","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"10.1016\/j.aei.2026.104508_b0185","doi-asserted-by":"crossref","first-page":"971","DOI":"10.1109\/LRA.2024.3511410","article-title":"FFBGNet: Full-Flow Bidirectional Feature Fusion Grasp Detection Network based on Hybrid Architecture","volume":"10","author":"Wan","year":"2025","journal-title":"IEEE Rob. Autom. Lett."},{"key":"10.1016\/j.aei.2026.104508_b0190","doi-asserted-by":"crossref","first-page":"4723","DOI":"10.1109\/TMECH.2024.3385104","article-title":"Interest Point selection and Feature Extraction in Six-DoF Grasp Pose Detection","volume":"29","author":"He","year":"2024","journal-title":"IEEE\/ASME Trans. Mechatron."},{"key":"10.1016\/j.aei.2026.104508_b0195","doi-asserted-by":"crossref","first-page":"3304","DOI":"10.1109\/ICRA.2011.5980145","article-title":"Efficient grasping from rgbd images: Learning using a new rectangle representation","author":"Jiang","year":"2011","journal-title":"IEEE International Conference on Robotics and Automation."},{"key":"10.1016\/j.aei.2026.104508_b0200","doi-asserted-by":"crossref","unstructured":"A. Depierre, E. Dellandr\u00e9a, L. Chen, Jacquard: A large scale dataset for robotic grasp detection, IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS).(2018)3511-3516. 10.1109\/IROS.2018.8593950.","DOI":"10.1109\/IROS.2018.8593950"},{"key":"10.1016\/j.aei.2026.104508_b0205","series-title":"IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"11444","article-title":"Graspnet-1billion: a large-scale benchmark for general object grasping","author":"Fang","year":"2020"},{"key":"10.1016\/j.aei.2026.104508_b0210","author":"OpenAI","year":"2024","journal-title":"Accessed"},{"key":"10.1016\/j.aei.2026.104508_b0215","unstructured":"E. Hu, Y. Shen, P. Wallis, Z. Allen-Zhu, Y. Li, S. Wang, W. Chen, Lora: Low-rank adaptation of large language models,International Conference on Learning Representations (ICLR).1(2022)3. https:\/\/arxiv.org\/abs\/2106.09685."},{"key":"10.1016\/j.aei.2026.104508_b0220","first-page":"54627","article-title":"Genpose: Generative category-level object pose estimation via diffusion models","volume":"36","author":"Zhang","year":"2023","journal-title":"Conference on Neural Information Processing System (NeurIPS)."},{"key":"10.1016\/j.aei.2026.104508_b0225","doi-asserted-by":"crossref","first-page":"303","DOI":"10.1016\/0098-3004(93)90090-R","article-title":"Principal components analysis","volume":"19","author":"Ma\u0107kiewicz","year":"1993","journal-title":"Comput. Geosci."},{"key":"10.1016\/j.aei.2026.104508_b0230","series-title":"IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","first-page":"11428","article-title":"A dataset of real-world manipulable object categories with pose annotations, affordances, and reconstructions","author":"Guo","year":"2023"},{"key":"10.1016\/j.aei.2026.104508_b0235","unstructured":"T. Ren, Y. Chen, Q. Jiang, Z. Zeng, Y. Xiong, W. Liu, ... & L. Zhang, Dino-x: A unified vision model for open-world object detection and understanding.arXiv preprint arXiv:2411.14347. https:\/\/arxiv.org\/abs\/2411.14347."},{"key":"10.1016\/j.aei.2026.104508_b0240","unstructured":"T. Ren, S. Liu, A. Zeng, J. Lin, K. Li, H. Cao, ... & L. Zhang, Grounded sam: Assembling open-world models for diverse visual tasks.arXiv preprint arXiv:2401.14159. https:\/\/arxiv.org\/abs\/2401.14159."},{"key":"10.1016\/j.aei.2026.104508_b0245","article-title":"A digital twin system for task-replanning and human-robot control of robot manipulation","volume":"62","author":"Li","year":"2024","journal-title":"Adv. Eng. Inf."}],"container-title":["Advanced Engineering Informatics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1474034626002004?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1474034626002004?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T20:02:25Z","timestamp":1774036945000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S1474034626002004"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,5]]},"references-count":49,"alternative-id":["S1474034626002004"],"URL":"https:\/\/doi.org\/10.1016\/j.aei.2026.104508","relation":{},"ISSN":["1474-0346"],"issn-type":[{"value":"1474-0346","type":"print"}],"subject":[],"published":{"date-parts":[[2026,5]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"VLM-PoseManip: Dexterous robotic manipulation via Vision-Language model based instructive pose estimation for Human-Robot collaboration","name":"articletitle","label":"Article Title"},{"value":"Advanced Engineering Informatics","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.aei.2026.104508","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier Ltd. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"104508"}}