{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T17:58:55Z","timestamp":1773770335051,"version":"3.50.1"},"reference-count":73,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"6","license":[{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62203150"],"award-info":[{"award-number":["62203150"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62276090"],"award-info":[{"award-number":["62276090"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62406102"],"award-info":[{"award-number":["62406102"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2018AAA0100803"],"award-info":[{"award-number":["2018AAA0100803"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Changzhou Sci&#x0026;Tech Program","award":["CJ20220051"],"award-info":[{"award-number":["CJ20220051"]}]},{"name":"Jiangsu Province Advanced Leading Technology and Basic Research Project","award":["CZ521001612"],"award-info":[{"award-number":["CZ521001612"]}]},{"DOI":"10.13039\/501100013058","name":"Jiangsu Provincial Key Research and Development Program","doi-asserted-by":"publisher","award":["BE2022160"],"award-info":[{"award-number":["BE2022160"]}],"id":[{"id":"10.13039\/501100013058","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100013058","name":"Jiangsu Provincial Key Research and Development Program","doi-asserted-by":"publisher","award":["BZ2024061"],"award-info":[{"award-number":["BZ2024061"]}],"id":[{"id":"10.13039\/501100013058","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Cogn. Dev. Syst."],"published-print":{"date-parts":[[2025,12]]},"DOI":"10.1109\/tcds.2025.3566649","type":"journal-article","created":{"date-parts":[[2025,5,5]],"date-time":"2025-05-05T13:59:01Z","timestamp":1746453541000},"page":"1461-1473","source":"Crossref","is-referenced-by-count":2,"title":["Exploring Grounding Abilities in Vision-Language Models Through Contextual Perception"],"prefix":"10.1109","volume":"17","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-8568-5680","authenticated-orcid":false,"given":"Wei","family":"Xu","sequence":"first","affiliation":[{"name":"College of Artificial Intelligence and Automation, Hohai University, Changzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5475-1473","authenticated-orcid":false,"given":"Tianfei","family":"Zhou","sequence":"additional","affiliation":[{"name":"Department of Computer Science, Beijing Institute of Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-3054-3905","authenticated-orcid":false,"given":"Taoyuan","family":"Zhang","sequence":"additional","affiliation":[{"name":"De Anza College, Cupertino, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2977-8559","authenticated-orcid":false,"given":"Jie","family":"Li","sequence":"additional","affiliation":[{"name":"Engineering Research Center of Intelligent Theranostics Technology and Instruments, Ministry of Education, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8636-526X","authenticated-orcid":false,"given":"Peiyin","family":"Chen","sequence":"additional","affiliation":[{"name":"College of Artificial Intelligence and Automation, Hohai University, Changzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9003-2054","authenticated-orcid":false,"given":"Jia","family":"Pan","sequence":"additional","affiliation":[{"name":"Department of Computer Science, University of Hong Kong, Hong Kong, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1310-6739","authenticated-orcid":false,"given":"Xiaofeng","family":"Liu","sequence":"additional","affiliation":[{"name":"College of Artificial Intelligence and Automation, Hohai University, Changzhou, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/s12559-024-10281-5"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/MIS.2024.3374582"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/tkde.2025.3536008"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/3571730"},{"key":"ref5","article-title":"Hallucination is inevitable: an innate limitation of large language models","author":"Xu","year":"2025"},{"key":"ref6","article-title":"GPT-4V(ision) system card","year":"2023","journal-title":"Tech. Rep."},{"key":"ref7","article-title":"Gemini: a family of highly capable multimodal models","author":"Team","year":"2023"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.tics.2007.09.009"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2010.02.004"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01237-3_5"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01300"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00730"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-024-02214-4"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2012.67"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00955"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3049156"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00286"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TCDS.2021.3079278"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00064"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i2.25340"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TCDS.2022.3225200"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2024.102601"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TCDS.2023.3266103"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TCDS.2022.3182877"},{"key":"ref25","article-title":"Do as I can, not as I say: Grounding language in robotic affordances","volume-title":"Proc. Conf. Robot Learn.","author":"Ahn","year":"2022"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA57147.2024.10611163"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/icra48891.2023.10161317"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/access.2024.3387941"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10160591"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2024.3360020"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00682"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01417"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1177\/02783649241281508"},{"key":"ref34","article-title":"Visual instruction tuning","author":"Liu","year":"2023"},{"key":"ref35","article-title":"InstructBLIP: towards general-purpose vision-language models with instruction tuning","author":"Dai","year":"2023"},{"key":"ref36","article-title":"MiniGPT-4: enhancing vision-language understanding with advanced large language models","author":"Zhu","year":"2023"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2025.3571946"},{"key":"ref38","article-title":"mPLUG-Owl: modularization empowers large language models with multimodality","author":"Ye","year":"2023"},{"key":"ref39","article-title":"LLaMA-Adapter: efficient fine-tuning of language models with zero-init attention","author":"Zhang","year":"2023"},{"key":"ref40","article-title":"InternGPT: solving vision-centric tasks by interacting with ChatGPT beyond language","author":"Liu","year":"2023"},{"key":"ref41","article-title":"Reasoning grasping via multimodal large language model","author":"Jin","year":"2024"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73232-4_2"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/IROS58592.2024.10801993"},{"key":"ref44","article-title":"Look before you leap: unveiling the power of GPT-4V in robotic vision-language planning","author":"Hu","year":"2023"},{"key":"ref45","first-page":"24824","article-title":"Chain-of-thought prompting elicits reasoning in large language models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Wei","year":"2022"},{"key":"ref46","article-title":"Least-to-most prompting enables complex reasoning in large language models","author":"Zhou","year":"2023"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.494"},{"key":"ref48","article-title":"Large language models as optimizers","author":"Yang","year":"2023"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"ref50","first-page":"19769","article-title":"Segment everything everywhere all at once","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Zou","year":"2023"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73195-2_27"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.01222"},{"key":"ref53","article-title":"T-Rex: counting by visual prompting","author":"Jiang","year":"2023"},{"key":"ref54","article-title":"The dawn of LMMs: preliminary explorations with GPT-4V(Ision)","author":"Yang","year":"2023"},{"key":"ref55","article-title":"A challenger to GPT-4V? early explorations of Gemini in visual expertise","author":"Fu","year":"2023"},{"key":"ref56","article-title":"Set-of-mark prompting unleashes extraordinary visual grounding in GPT-4v","author":"Yang","year":"2023"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00226"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01258-8_23"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73337-6_9"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1098\/rstb.2010.0188"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206537"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1086"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72970-6_3"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52729.2023.01451"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01789"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01101"},{"key":"ref67","article-title":"Fine-grained visual prompting","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Yang","year":"2023"},{"key":"ref68","article-title":"Shikra: Unleashing multimodal LLM\u2019s referential dialogue magic","author":"Chen","year":"2023"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.02484"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00915"},{"key":"ref71","article-title":"MiniGPT-v2: Large language model as a unified interface for vision-language multi-task learning","author":"Chen","year":"2023"},{"key":"ref72","article-title":"Ferret: refer and ground anything anywhere at any granularity","author":"You","year":"2023"},{"key":"ref73","article-title":"CogVLM: Visual expert for pretrained language models","author":"Wang","year":"2024"}],"container-title":["IEEE Transactions on Cognitive and Developmental Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7274989\/11288412\/10985830.pdf?arnumber=10985830","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,16]],"date-time":"2025-12-16T18:33:11Z","timestamp":1765909991000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10985830\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12]]},"references-count":73,"journal-issue":{"issue":"6"},"URL":"https:\/\/doi.org\/10.1109\/tcds.2025.3566649","relation":{},"ISSN":["2379-8920","2379-8939"],"issn-type":[{"value":"2379-8920","type":"print"},{"value":"2379-8939","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,12]]}}}