{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T18:46:08Z","timestamp":1766083568439,"version":"3.48.0"},"reference-count":68,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U23B2031"],"award-info":[{"award-number":["U23B2031"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62306329"],"award-info":[{"award-number":["62306329"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004735","name":"Natural Science Foundation of Hunan Province","doi-asserted-by":"publisher","award":["2023JJ40676"],"award-info":[{"award-number":["2023JJ40676"]}],"id":[{"id":"10.13039\/501100004735","id-type":"DOI","asserted-by":"publisher"}]},{"name":"China Association for Science and Technology Youth Talent Supporting Program","award":["2024-JCJQ-QT-034"],"award-info":[{"award-number":["2024-JCJQ-QT-034"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Multimedia"],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tmm.2025.3618553","type":"journal-article","created":{"date-parts":[[2025,10,22]],"date-time":"2025-10-22T17:25:19Z","timestamp":1761153919000},"page":"9776-9787","source":"Crossref","is-referenced-by-count":0,"title":["SwimVG: Step-Wise Multimodal Fusion and Adaption for Visual Grounding"],"prefix":"10.1109","volume":"27","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-5934-8837","authenticated-orcid":false,"given":"Liangtao","family":"Shi","sequence":"first","affiliation":[{"name":"Key Laboratory of Knowledge Engineering with Big Data, Hefei University of Technology, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2597-3020","authenticated-orcid":false,"given":"Ting","family":"Liu","sequence":"additional","affiliation":[{"name":"School of systems engineering, National University of Defense Technology, Changsha, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-1541-1717","authenticated-orcid":false,"given":"Xiantao","family":"Hu","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8115-7020","authenticated-orcid":false,"given":"Yue","family":"Hu","sequence":"additional","affiliation":[{"name":"School of systems engineering, National University of Defense Technology, Changsha, China"}]},{"given":"Quanjun","family":"Yin","sequence":"additional","affiliation":[{"name":"School of systems engineering, National University of Defense Technology, Changsha, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5461-3986","authenticated-orcid":false,"given":"Richang","family":"Hong","sequence":"additional","affiliation":[{"name":"Key Laboratory of Knowledge Engineering with Big Data, Hefei University of Technology, Hefei, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00180"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01045"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2020.3042066"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2025.3630635"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00008"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.279"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TIM.2022.3224525"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TIM.2020.3027926"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00179"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3296823"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3328185"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01506"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3681071"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICME57554.2024.10688132"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01832"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10446504"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i5.28286"},{"key":"ref18","first-page":"16664","article-title":"AdaptFormer: Adapting vision transformers for scalable visual recognition","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Chen","year":"2022"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3321501"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46475-6_5"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.9"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46493-0_48"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/iccv.2015.303"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00142"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00478"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3374786"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2024.3423663"},{"article-title":"Show me what and where has changed? Question answering and grounding for remote sensing change detection","year":"2024","author":"Li","key":"ref28"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/LGRS.2024.3386311"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00477"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2019.2911066"},{"key":"ref32","article-title":"Referring expression object segmentation with caption-aware consistency","volume-title":"Proc. Brit. Mach. Vis. Conf.","author":"Chen","year":"2019"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICME52920.2022.9859880"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00928"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19833-5_35"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i2.25331"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/tim.2023.3324362"},{"key":"ref38","article-title":"QWEN-Vl: A versatile vision-language model for understanding, localization, text reading, and beyond","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Bai","year":"2023"},{"key":"ref39","first-page":"61501","article-title":"VisionLLM: Large language model is also an open-ended decoder for vision-centric tasks","volume-title":"Proc. 37th Int. Conf. Neural Inf. Process. Syst.","author":"Wang","year":"2024"},{"key":"ref40","first-page":"121475","article-title":"CogVLM: Visual expert for pretrained language models","volume-title":"Proc. Adv. Neural Inform. Process. Syst.","volume":"37","author":"Wang","year":"2024"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"ref42","article-title":"LoRA: Low-rank adaptation of large language models","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Hu","year":"2022"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2023.3308969"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01393"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.567"},{"article-title":"Sparse-Tuning: Adapting vision transformers with efficient fine-tuning and inference","year":"2024","author":"Liu","key":"ref46"},{"article-title":"GPT-4 technical report","year":"2023","author":"Achiam","key":"ref47"},{"article-title":"Llama 2: Open foundation and fine-tuned chat models","year":"2023","author":"Touvron","key":"ref48"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.287"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/tcsvt.2025.3551766"},{"key":"ref51","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford","year":"2021"},{"key":"ref52","article-title":"Dinov2: Learning robust visual features without supervision","author":"Oquab","year":"2023","journal-title":"Trans. Mach. Learn. Res."},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2010.11929"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"article-title":"YOLOv3: An incremental improvement","year":"2018","author":"Redmon","key":"ref55"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58568-6_23"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2022.3147385"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3183827"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-25085-9_1"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3343736"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2023.3334099"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3339628"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01277"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00075"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46475-6_5"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00166"},{"key":"ref67","article-title":"UniAdapter: Unified parameter-efficient transfer learning for cross-modal modeling","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Lu","year":"2024"},{"article-title":"Multi-stage vision token dropping: Towards efficient multimodal large language model","year":"2024","author":"Liu","key":"ref68"}],"container-title":["IEEE Transactions on Multimedia"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6046\/10844992\/11214462.pdf?arnumber=11214462","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T18:34:22Z","timestamp":1766082862000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11214462\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":68,"URL":"https:\/\/doi.org\/10.1109\/tmm.2025.3618553","relation":{},"ISSN":["1520-9210","1941-0077"],"issn-type":[{"type":"print","value":"1520-9210"},{"type":"electronic","value":"1941-0077"}],"subject":[],"published":{"date-parts":[[2025]]}}}