{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T16:16:53Z","timestamp":1775578613698,"version":"3.50.1"},"reference-count":97,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"12","license":[{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62236004"],"award-info":[{"award-number":["62236004"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62206078"],"award-info":[{"award-number":["62206078"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62476073"],"award-info":[{"award-number":["62476073"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Circuits Syst. Video Technol."],"published-print":{"date-parts":[[2025,12]]},"DOI":"10.1109\/tcsvt.2025.3578266","type":"journal-article","created":{"date-parts":[[2025,6,9]],"date-time":"2025-06-09T13:36:53Z","timestamp":1749476213000},"page":"12278-12291","source":"Crossref","is-referenced-by-count":1,"title":["Manager: Aggregating Insights From Unimodal Experts in Two-Tower VLMs and MLLMs"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-5774-1094","authenticated-orcid":false,"given":"Xiao","family":"Xu","sequence":"first","affiliation":[{"name":"Research Center for Social Computing and Interactive Robotics, Harbin Institute of Technology, Harbin, Heilongjiang, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3619-675X","authenticated-orcid":false,"given":"Libo","family":"Qin","sequence":"additional","affiliation":[{"name":"School of Computer Science and Engineering, Central South University, Changsha, Hunan, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3907-0335","authenticated-orcid":false,"given":"Wanxiang","family":"Che","sequence":"additional","affiliation":[{"name":"Research Center for Social Computing and Interactive Robotics, Harbin Institute of Technology, Harbin, Heilongjiang, China"}]},{"given":"Min-Yen","family":"Kan","sequence":"additional","affiliation":[{"name":"School of Computing, National University of Singapore, Queenstown, Singapore"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.811"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.670"},{"key":"ref3","article-title":"Visual entailment: A novel task for fine-grained image understanding","author":"Xie","year":"2019","journal-title":"arXiv:1901.06706"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1644"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00166"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01763"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i9.26263"},{"key":"ref8","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"139","author":"Radford"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1907.11692"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/p19-1176"},{"key":"ref11","article-title":"LLaVA-OneVision: Easy visual task transfer","author":"Li","year":"2024","journal-title":"arXiv:2408.03326"},{"key":"ref12","article-title":"Llava-next: Improved reasoning, OCR, and world knowledge","author":"Liu","year":"2024"},{"key":"ref13","first-page":"1","article-title":"How much can CLIP benefit vision-and-language tasks?","volume-title":"Proc. ICLR","author":"Shen"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-acl.251"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P16-1162"},{"key":"ref16","article-title":"Language models are unsupervised multitask learners","author":"Radford","year":"2019"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref18","first-page":"13","article-title":"ViLBERT: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks","volume-title":"Proc. 33rd Int. Conf. Neural Inf. Process. Syst.","author":"Lu"},{"key":"ref19","first-page":"3015","article-title":"Multi-layer representation fusion for neural machine translation","volume-title":"Proc. COLING","author":"Wang"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.40"},{"key":"ref21","article-title":"Layer normalization","author":"Lei Ba","year":"2016","journal-title":"arXiv:1607.06450"},{"issue":"120","key":"ref22","first-page":"1","article-title":"Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity","volume":"23","author":"Fedus","year":"2022","journal-title":"J. Mach. Learn. Res."},{"key":"ref23","first-page":"1","article-title":"Decoupled weight decay regularization","volume-title":"Proc. ICLR","author":"Loshchilov"},{"key":"ref24","first-page":"5583","article-title":"ViLT: Vision-and-language transformer without convolution or region supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kim"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58577-8_7"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.202"},{"key":"ref27","first-page":"9694","article-title":"Align before fuse: Vision and language representation learning with momentum distillation","volume-title":"Proc. NeurIPS","author":"Li"},{"key":"ref28","first-page":"32897","article-title":"VLMo: Unified vision-language pre-training with mixture-of-modality-experts","volume-title":"Proc. NeurIPS","author":"Wang"},{"key":"ref29","first-page":"1","article-title":"SimVLM: Simple visual language model pretraining with weak supervision","volume-title":"Proc. ICLR","author":"Wang"},{"key":"ref30","first-page":"12888","article-title":"BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1238"},{"key":"ref32","first-page":"1143","article-title":"Im2Text: Describing images using 1 million captioned photographs","volume-title":"Proc. NeurIPS","volume":"24","author":"Ord\u00f3\u00f1ez"},{"key":"ref33","article-title":"Microsoft COCO captions: Data collection and evaluation server","author":"Chen","year":"2015","journal-title":"arXiv:1504.00325"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0981-7"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02484"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02527"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/WACV48630.2021.00225"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/s11432-024-4235-6"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01100"},{"key":"ref40","article-title":"Qwen2 technical report","volume-title":"arXiv:2407.10671","author":"Yang","year":"2024"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2203.16527"},{"key":"ref42","article-title":"LLaMA-adapter: Efficient fine-tuning of language models with zero-init attention","author":"Zhang","year":"2023","journal-title":"arXiv:2303.16199"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00331"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00686"},{"key":"ref45","first-page":"57730","article-title":"MM-vet: Evaluating large multimodal models for integrated capabilities","volume-title":"Proc. ICML","author":"Yu"},{"key":"ref46","article-title":"Seed-bench: Benchmarking multimodal llms with generative comprehension","author":"Li","year":"2023","journal-title":"arXiv:2307.16125"},{"key":"ref47","volume-title":"Grok-1.5 Vision Preview","year":"2019"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00851"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-acl.177"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/WACV51458.2022.00264"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46493-0_15"},{"key":"ref52","first-page":"2507","article-title":"Learn to explain: Multimodal reasoning via thought chains for science question answering","volume-title":"Proc. NeurIPS","author":"Lu"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00913"},{"key":"ref54","first-page":"1","article-title":"Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts","volume-title":"Proc. ICLR","author":"Lu"},{"key":"ref55","article-title":"Llava-next: What else influences visual instruction tuning beyond data?","author":"Li","year":"2024"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.findings-naacl.51"},{"key":"ref57","first-page":"34892","article-title":"Visual instruction tuning","volume-title":"Proc. NeurIPS","author":"Liu"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2010.11929"},{"key":"ref59","article-title":"Mini-monkey: Alleviating the semantic sawtooth effect for lightweight MLLMs via complementary image pyramid","author":"Huang","year":"2024","journal-title":"arXiv:2408.02034"},{"key":"ref60","first-page":"1","article-title":"Neural machine translation by jointly learning to align and translate","volume-title":"Proc. ICLR","author":"Bahdanau"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01391"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1214\/aoms\/1177729694"},{"key":"ref63","first-page":"1","article-title":"VL-BERT: Pre-training of generic visual-linguistic representations","volume-title":"Proc. ICLR","author":"Su"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6795"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58577-8_8"},{"key":"ref66","article-title":"OFA: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework","author":"Wang","year":"2022","journal-title":"arXiv:2202.03052"},{"key":"ref67","article-title":"Image as a foreign language: BEiT pretraining for all vision and vision-language tasks","author":"Wang","year":"2022","journal-title":"arXiv:2208.10442"},{"key":"ref68","article-title":"CoCa: Contrastive captioners are image-text foundation models","author":"Yu","year":"2022","journal-title":"arXiv:2205.01917"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3452437"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3391304"},{"key":"ref71","first-page":"12116","article-title":"Do vision transformers see like convolutional neural networks","volume-title":"Proc. NeurIPS","author":"Raghu"},{"key":"ref72","first-page":"23296","article-title":"Intriguing properties of vision transformers","volume-title":"Proc. NIPS","volume":"34","author":"Naseer"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-1179"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N19-1112"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1356"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1810.04805"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.106"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.243"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00255"},{"key":"ref80","first-page":"12077","article-title":"SegFormer: Simple and efficient design for semantic segmentation with transformers","volume-title":"Proc. Adv. Neural Inf. Process. Sys. (NIPS)","volume":"34","author":"Xie"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3266222"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3383238"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3497997"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N18-1202"},{"key":"ref85","first-page":"32942","article-title":"Coarse-to-fine vision-language pre-training with fusion in the backbone","volume-title":"Proc. NeurIPS","author":"Dou"},{"key":"ref86","article-title":"Dense connector for MLLMs","author":"Yao","year":"2024","journal-title":"arXiv:2405.13800"},{"key":"ref87","article-title":"TokenPacker: Efficient visual projector for multimodal LLM","author":"Li","year":"2024","journal-title":"arXiv:2407.02392"},{"key":"ref88","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. NeurIPS","author":"Brown"},{"key":"ref89","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023","journal-title":"arXiv:2307.09288"},{"key":"ref90","article-title":"Large language models meet NLP: A survey","author":"Qin","year":"2024","journal-title":"arXiv:2405.12819"},{"key":"ref91","first-page":"19730","article-title":"BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"Proc. ICML","author":"Li"},{"key":"ref92","article-title":"Introducing our multimodal models","author":"Bavishi","year":"2023"},{"key":"ref93","article-title":"Mini-gemini: Mining the potential of multi-modality vision language models","author":"Li","year":"2024","journal-title":"arXiv:2403.18814"},{"key":"ref94","article-title":"Qwen2-VL: Enhancing vision-language Model\u2019s perception of the world at any resolution","author":"Wang","year":"2024","journal-title":"arXiv:2409.12191"},{"key":"ref95","article-title":"Oryx MLLM: On-demand spatial\u2013temporal understanding at arbitrary resolution","author":"Liu","year":"2024","journal-title":"arXiv:2409.12961"},{"key":"ref96","article-title":"Sphinx: The joint mixing of weights, tasks, and visual embeddings for multi-modal large language models","author":"Lin","year":"2023","journal-title":"ArXiv:2311.07575"},{"key":"ref97","article-title":"TextMonkey: An OCR-free large multimodal model for understanding document","author":"Liu","year":"2024","journal-title":"arXiv:2403.04473"}],"container-title":["IEEE Transactions on Circuits and Systems for Video Technology"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/76\/11278843\/11029062.pdf?arnumber=11029062","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,5]],"date-time":"2026-01-05T18:41:11Z","timestamp":1767638471000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11029062\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12]]},"references-count":97,"journal-issue":{"issue":"12"},"URL":"https:\/\/doi.org\/10.1109\/tcsvt.2025.3578266","relation":{},"ISSN":["1051-8215","1558-2205"],"issn-type":[{"value":"1051-8215","type":"print"},{"value":"1558-2205","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,12]]}}}