{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,19]],"date-time":"2026-02-19T02:20:10Z","timestamp":1771467610512,"version":"3.50.1"},"publisher-location":"Cham","reference-count":105,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031726392","type":"print"},{"value":"9783031726408","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T00:00:00Z","timestamp":1730160000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T00:00:00Z","timestamp":1730160000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72640-8_7","type":"book-chapter","created":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T09:34:20Z","timestamp":1730108060000},"page":"113-132","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":12,"title":["BRAVE: Broadening the\u00a0Visual Encoding of\u00a0Vision-Language Models"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5323-579X","authenticated-orcid":false,"given":"O\u011fuzhan Fatih","family":"Kar","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3358-9686","authenticated-orcid":false,"given":"Alessio","family":"Tonioni","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6920-5109","authenticated-orcid":false,"given":"Petra","family":"Poklukar","sequence":"additional","affiliation":[]},{"given":"Achin","family":"Kulshrestha","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5559-1843","authenticated-orcid":false,"given":"Amir","family":"Zamir","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5598-5212","authenticated-orcid":false,"given":"Federico","family":"Tombari","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,29]]},"reference":[{"key":"7_CR1","unstructured":"Achiam, J., et\u00a0al.: GPT-4 technical report. arXiv preprint arXiv:2303.08774 (2023)"},{"key":"7_CR2","doi-asserted-by":"crossref","unstructured":"Agrawal, H., et al.: NoCaps: novel object captioning at scale. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 8948\u20138957 (2019)","DOI":"10.1109\/ICCV.2019.00904"},{"key":"7_CR3","first-page":"23716","volume":"35","author":"JB Alayrac","year":"2022","unstructured":"Alayrac, J.B., et al.: Flamingo: a visual language model for few-shot learning. Adv. Neural. Inf. Process. Syst. 35, 23716\u201323736 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"7_CR4","unstructured":"Awadalla, A., et\u00a0al.: OpenFlamingo: an open-source framework for training large autoregressive vision-language models. arXiv preprint arXiv:2308.01390 (2023)"},{"key":"7_CR5","unstructured":"Bachmann, R., et al.: 4M-21: an any-to-any vision model for tens of tasks and modalities. arXiv preprint arXiv:2406.09406 (2024)"},{"key":"7_CR6","unstructured":"Bai, J., et\u00a0al.: Qwen technical report. arXiv preprint arXiv:2309.16609 (2023)"},{"key":"7_CR7","doi-asserted-by":"crossref","unstructured":"Bai, Y., et al.: Sequential modeling enables scalable learning for large vision models. arXiv preprint arXiv:2312.00785 (2023)","DOI":"10.1109\/CVPR52733.2024.02157"},{"key":"7_CR8","doi-asserted-by":"crossref","unstructured":"Bang, Y., et\u00a0al.: A multitask, multilingual, multimodal evaluation of chatGPT on reasoning, hallucination, and interactivity. arXiv preprint arXiv:2302.04023 (2023)","DOI":"10.18653\/v1\/2023.ijcnlp-main.45"},{"key":"7_CR9","unstructured":"Brock, A., De, S., Smith, S.L., Simonyan, K.: High-performance large-scale image recognition without normalization. In: International Conference on Machine Learning, pp. 1059\u20131071. PMLR (2021)"},{"key":"7_CR10","unstructured":"Byeon, M., Park, B., Kim, H., Lee, S., Baek, W., Kim, S.: COYO-700M: image-text pair dataset (2022). https:\/\/github.com\/kakaobrain\/coyo-dataset"},{"key":"7_CR11","doi-asserted-by":"crossref","unstructured":"Caron, M., et al.: Emerging properties in self-supervised vision transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 9650\u20139660 (2021)","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"7_CR12","doi-asserted-by":"crossref","unstructured":"Changpinyo, S., Kukliansky, D., Szpektor, I., Chen, X., Ding, N., Soricut, R.: All you may need for VQA are image captions. arXiv preprint arXiv:2205.01883 (2022)","DOI":"10.18653\/v1\/2022.naacl-main.142"},{"key":"7_CR13","doi-asserted-by":"crossref","unstructured":"Changpinyo, S., Sharma, P.K., Ding, N., Soricut, R.: Conceptual 12M: pushing web-scale image-text pre-training to recognize long-tail visual concepts. In: 2021 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3557\u20133567 (2021)","DOI":"10.1109\/CVPR46437.2021.00356"},{"key":"7_CR14","unstructured":"Chen, J., et al.: MiniGPT-v2: large language model as a unified interface for vision-language multi-task learning. arXiv preprint arXiv:2310.09478 (2023)"},{"key":"7_CR15","unstructured":"Chen, K., Zhang, Z., Zeng, W., Zhang, R., Zhu, F., Zhao, R.: Shikra: unleashing multimodal LLM\u2019s referential dialogue magic. arXiv preprint arXiv:2306.15195 (2023)"},{"key":"7_CR16","doi-asserted-by":"crossref","unstructured":"Chen, L., et al.: ShareGPT4V: improving large multi-modal models with better captions. arXiv preprint arXiv:2311.12793 (2023)","DOI":"10.1007\/978-3-031-72643-9_22"},{"key":"7_CR17","unstructured":"Chen, T., Saxena, S., Li, L., Fleet, D.J., Hinton, G.: Pix2Seq: a language modeling framework for object detection. In: International Conference on Learning Representations (2022)"},{"key":"7_CR18","unstructured":"Chen, X., et\u00a0al.: PALI-x: on scaling up a multilingual vision and language model. arXiv preprint arXiv:2305.18565 (2023)"},{"key":"7_CR19","unstructured":"Chen, X., et\u00a0al.: PALI: a jointly-scaled multilingual language-image model. arXiv preprint arXiv:2209.06794 (2022)"},{"key":"7_CR20","unstructured":"Chen, X., et al.: Microsoft CoCo captions: data collection and evaluation server. arXiv preprint arXiv:1504.00325 (2015)"},{"key":"7_CR21","doi-asserted-by":"crossref","unstructured":"Cherti, M., et al.: Reproducible scaling laws for contrastive language-image learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2818\u20132829 (2023)","DOI":"10.1109\/CVPR52729.2023.00276"},{"key":"7_CR22","unstructured":"Chiang, W.L., et\u00a0al.: Vicuna: an open-source chatbot impressing GPT-4 with 90%* ChatGPT quality (2023). https:\/\/vicunalmsys.org"},{"key":"7_CR23","unstructured":"Cho, J., Lei, J., Tan, H., Bansal, M.: Unifying vision-and-language tasks via text generation. In: International Conference on Machine Learning, pp. 1931\u20131942. PMLR (2021)"},{"key":"7_CR24","unstructured":"Chung, H.W., et\u00a0al.: Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416 (2022)"},{"key":"7_CR25","unstructured":"Dai, W., et al.: InstructBLIP: towards general-purpose vision-language models with instruction tuning. In: Thirty-Seventh Conference on Neural Information Processing Systems (2023). https:\/\/openreview.net\/forum?id=vvoWPYqZJA"},{"key":"7_CR26","doi-asserted-by":"crossref","unstructured":"Dehghani, M., Gritsenko, A., Arnab, A., Minderer, M., Tay, Y.: SCENIC: a JAX library for computer vision research and beyond. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 21393\u201321398 (2022)","DOI":"10.1109\/CVPR52688.2022.02070"},{"key":"7_CR27","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale. In: International Conference on Learning Representations (2021)"},{"key":"7_CR28","unstructured":"Driess, D., et\u00a0al.: PALM-e: an embodied multimodal language model. arXiv preprint arXiv:2303.03378 (2023)"},{"key":"7_CR29","doi-asserted-by":"crossref","unstructured":"Eftekhar, A., Sax, A., Bachmann, R., Malik, J., Zamir, A.R.: Omnidata: a scalable pipeline for making multi-task mid-level vision datasets from 3D scans. In: 2021 IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 10766\u201310776 (2021)","DOI":"10.1109\/ICCV48922.2021.01061"},{"key":"7_CR30","doi-asserted-by":"crossref","unstructured":"Fang, Y., et al.: EVA: exploring the limits of masked visual representation learning at scale. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 19358\u201319369 (2023)","DOI":"10.1109\/CVPR52729.2023.01855"},{"key":"7_CR31","doi-asserted-by":"crossref","unstructured":"Fang, Y., et al.: EVA: exploring the limits of masked visual representation learning at scale. arXiv preprint arXiv:2211.07636 (2022)","DOI":"10.1109\/CVPR52729.2023.01855"},{"key":"7_CR32","unstructured":"Fu, C., et\u00a0al.: MME: a comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394 (2023)"},{"issue":"1","key":"7_CR33","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1162\/neco.1992.4.1.1","volume":"4","author":"S Geman","year":"1992","unstructured":"Geman, S., Bienenstock, E., Doursat, R.: Neural networks and the bias\/variance dilemma. Neural Comput. 4(1), 1\u201358 (1992)","journal-title":"Neural Comput."},{"key":"7_CR34","unstructured":"Gong, T., et al.: Multimodal-GPT: a vision and language model for dialogue with humans. arXiv preprint arXiv:2305.04790 (2023)"},{"issue":"2","key":"7_CR35","doi-asserted-by":"publisher","first-page":"237","DOI":"10.1177\/030631295025002002","volume":"25","author":"C Goodwin","year":"1995","unstructured":"Goodwin, C.: Seeing in depth. Soc. Stud. Sci. 25(2), 237\u2013274 (1995)","journal-title":"Soc. Stud. Sci."},{"key":"7_CR36","doi-asserted-by":"crossref","unstructured":"Goyal, Y., Khot, T., Summers-Stay, D., Batra, D., Parikh, D.: Making the V in VQA matter: elevating the role of image understanding in visual question answering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6904\u20136913 (2017)","DOI":"10.1109\/CVPR.2017.670"},{"key":"7_CR37","unstructured":"Guo, B., et al.: How close is ChatGPT to human experts? Comparison corpus, evaluation, and detection. arXiv preprint arXiv:2301.07597 (2023)"},{"key":"7_CR38","doi-asserted-by":"crossref","unstructured":"Guo, J., et al.: From images to textual prompts: zero-shot visual question answering with frozen large language models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10867\u201310877 (2023)","DOI":"10.1109\/CVPR52729.2023.01046"},{"key":"7_CR39","doi-asserted-by":"crossref","unstructured":"Gurari, D., et al.: VizWiz grand challenge: answering visual questions from blind people. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3608\u20133617 (2018)","DOI":"10.1109\/CVPR.2018.00380"},{"key":"7_CR40","unstructured":"Han, J., et\u00a0al.: ImageBind-LLM: multi-modality instruction tuning. arXiv preprint arXiv:2309.03905 (2023)"},{"key":"7_CR41","doi-asserted-by":"crossref","unstructured":"He, K., Chen, X., Xie, S., Li, Y., Doll\u2019ar, P., Girshick, R.B.: Masked autoencoders are scalable vision learners. In: 2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 15979\u201315988 (2021)","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"7_CR42","unstructured":"Heek, J., et al.: Flax: a neural network library and ecosystem for JAX (2020). http:\/\/github.com\/google\/flax"},{"key":"7_CR43","doi-asserted-by":"crossref","unstructured":"Herzig, R., et al.: PromptonomyViT: multi-task prompt learning improves video transformers using synthetic scene data. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 6803\u20136815 (2024)","DOI":"10.1109\/WACV57701.2024.00666"},{"key":"7_CR44","unstructured":"Hoffmann, J., et\u00a0al.: Training compute-optimal large language models. arXiv preprint arXiv:2203.15556 (2022)"},{"key":"7_CR45","volume-title":"Binocular vision and stereopsis","author":"IP Howard","year":"1995","unstructured":"Howard, I.P., Rogers, B.J.: Binocular vision and stereopsis. Oxford University Press, USA (1995)"},{"key":"7_CR46","unstructured":"Hu, J.E., et al.: LoRA: low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685 (2021). https:\/\/api.semanticscholar.org\/CorpusID:235458009"},{"key":"7_CR47","doi-asserted-by":"crossref","unstructured":"Huang, W., Liu, H., Guo, M., Gong, N.Z.: Visual hallucinations of multi-modal large language models. arXiv preprint arXiv:2402.14683 (2024)","DOI":"10.18653\/v1\/2024.findings-acl.573"},{"key":"7_CR48","doi-asserted-by":"crossref","unstructured":"Hudson, D.A., Manning, C.D.: GQA: a new dataset for real-world visual reasoning and compositional question answering. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6700\u20136709 (2019)","DOI":"10.1109\/CVPR.2019.00686"},{"key":"7_CR49","doi-asserted-by":"publisher","unstructured":"Ilharco, G., et al.: Openclip (2021). https:\/\/doi.org\/10.5281\/zenodo.5143773","DOI":"10.5281\/zenodo.5143773"},{"key":"7_CR50","unstructured":"Jia, C., et al.: Scaling up visual and vision-language representation learning with noisy text supervision. In: International Conference on Machine Learning, pp. 4904\u20134916. PMLR (2021)"},{"key":"7_CR51","unstructured":"Jian, Y., Gao, C., Vosoughi, S.: Bootstrapping vision-language learning with decoupled language pre-training. Adv. Neural Inf. Process. Syst. 36 (2024)"},{"key":"7_CR52","doi-asserted-by":"crossref","unstructured":"Kar, O.F., Yeo, T., Atanov, A., Zamir, A.: 3D common corruptions and data augmentation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 18963\u201318974 (2022)","DOI":"10.1109\/CVPR52688.2022.01839"},{"key":"7_CR53","doi-asserted-by":"crossref","unstructured":"Karpathy, A., Fei-Fei, L.: Deep visual-semantic alignments for generating image descriptions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3128\u20133137 (2015)","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"7_CR54","unstructured":"Kirillov, A., et\u00a0al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023)"},{"key":"7_CR55","doi-asserted-by":"crossref","unstructured":"Korbar, B., Xian, Y., Tonioni, A., Zisserman, A., Tombari, F.: Text-conditioned resampler for long form video understanding. arXiv preprint arXiv:2312.11897 (2023)","DOI":"10.1007\/978-3-031-73016-0_16"},{"key":"7_CR56","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597 (2023)"},{"key":"7_CR57","unstructured":"Li, J., Li, D., Xiong, C., Hoi, S.C.H.: BLIP: bootstrapping language-image pre-training for unified vision-language understanding and generation. In: International Conference on Machine Learning (2022)"},{"key":"7_CR58","first-page":"9694","volume":"34","author":"J Li","year":"2021","unstructured":"Li, J., Selvaraju, R., Gotmare, A., Joty, S., Xiong, C., Hoi, S.C.H.: Align before fuse: vision and language representation learning with momentum distillation. Adv. Neural. Inf. Process. Syst. 34, 9694\u20139705 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"7_CR59","doi-asserted-by":"crossref","unstructured":"Li, Y., Du, Y., Zhou, K., Wang, J., Zhao, W.X., Wen, J.R.: Evaluating object hallucination in large vision-language models. arXiv preprint arXiv:2305.10355 (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.20"},{"key":"7_CR60","unstructured":"Lin, J., et al.: VILA: on pre-training for visual language models. arXiv preprint arXiv:2312.07533 (2023)"},{"key":"7_CR61","doi-asserted-by":"crossref","unstructured":"Lin, Z., et\u00a0al.: SPHINX: the joint mixing of weights, tasks, and visual embeddings for multi-modal large language models. arXiv preprint arXiv:2311.07575 (2023)","DOI":"10.1007\/978-3-031-73033-7_3"},{"key":"7_CR62","unstructured":"Liu, F., Lin, K., Li, L., Wang, J., Yacoob, Y., Wang, L.: Mitigating hallucination in large multi-modal models via robust instruction tuning. In: The Twelfth International Conference on Learning Representations (2023)"},{"key":"7_CR63","unstructured":"Liu, H., et al.: A survey on hallucination in large vision-language models. arXiv preprint arXiv:2402.00253 (2024)"},{"key":"7_CR64","doi-asserted-by":"crossref","unstructured":"Liu, H., Li, C., Li, Y., Lee, Y.J.: Improved baselines with visual instruction tuning. arXiv preprint arXiv:2310.03744 (2023)","DOI":"10.1109\/CVPR52733.2024.02484"},{"key":"7_CR65","unstructured":"Liu, H., et al.: LLaVA-NeXT: improved reasoning, OCR, and world knowledge (2024)"},{"key":"7_CR66","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning. arXiv preprint arXiv:2304.08485 (2023)"},{"key":"7_CR67","unstructured":"Liu, S., Fan, L.J., Johns, E., Yu, Z., Xiao, C., Anandkumar, A.: Prismer: a vision-language model with an ensemble of experts. arXiv preprint arXiv:2303.02506 (2023)"},{"key":"7_CR68","doi-asserted-by":"crossref","unstructured":"Lu, J., et al.: Unified-IO 2: scaling autoregressive multimodal models with vision, language, audio, and action. arXiv preprint arXiv:2312.17172 (2023)","DOI":"10.1109\/CVPR52733.2024.02497"},{"key":"7_CR69","unstructured":"Lu, J., Clark, C., Zellers, R., Mottaghi, R., Kembhavi, A.: Unified-IO: a unified model for vision, language, and multi-modal tasks. In: The Eleventh International Conference on Learning Representations (2023)"},{"key":"7_CR70","doi-asserted-by":"crossref","unstructured":"Marino, K., Rastegari, M., Farhadi, A., Mottaghi, R.: Ok-VQA: a visual question answering benchmark requiring external knowledge. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3195\u20133204 (2019)","DOI":"10.1109\/CVPR.2019.00331"},{"key":"7_CR71","unstructured":"McKinzie, B., et\u00a0al.: MM1: methods, analysis and insights from multimodal LLM pre-training. arXiv preprint arXiv:2403.09611 (2024)"},{"key":"7_CR72","unstructured":"Mizrahi, D., et al.: 4M: massively multimodal masked modeling. In: Advances in Neural Information Processing Systems (2023)"},{"key":"7_CR73","doi-asserted-by":"crossref","unstructured":"Naeem, M.F., Xian, Y., Zhai, X., Hoyer, L., Van\u00a0Gool, L., Tombari, F.: SILC: improving vision language pretraining with self-distillation. arXiv preprint arXiv:2310.13355 (2023)","DOI":"10.1007\/978-3-031-72664-4_3"},{"key":"7_CR74","unstructured":"Oquab, M., et al.: DINOv2: learning robust visual features without supervision. arXiv preprint arXiv:2304.07193 (2023)"},{"key":"7_CR75","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning (2021)"},{"issue":"1","key":"7_CR76","first-page":"5485","volume":"21","author":"C Raffel","year":"2020","unstructured":"Raffel, C., et al.: Exploring the limits of transfer learning with a unified text-to-text transformer. J.Mach. Learn. Res. 21(1), 5485\u20135551 (2020)","journal-title":"J.Mach. Learn. Res."},{"key":"7_CR77","first-page":"25278","volume":"35","author":"C Schuhmann","year":"2022","unstructured":"Schuhmann, C., et al.: LAION-5B: an open large-scale dataset for training next generation image-text models. Adv. Neural. Inf. Process. Syst. 35, 25278\u201325294 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"7_CR78","doi-asserted-by":"crossref","unstructured":"Shen, Y., et al.: ChatGPT and other large language models are double-edged swords (2023)","DOI":"10.1148\/radiol.230163"},{"key":"7_CR79","unstructured":"Team, G., et\u00a0al.: Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805 (2023)"},{"key":"7_CR80","doi-asserted-by":"crossref","unstructured":"Thorp, H.H.: ChatGPT is fun, but not an author (2023)","DOI":"10.1126\/science.adg7879"},{"key":"7_CR81","doi-asserted-by":"crossref","unstructured":"Thrush, T., et al.: WinoGround: probing vision and language models for visio-linguistic compositionality. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5238\u20135248 (2022)","DOI":"10.1109\/CVPR52688.2022.00517"},{"key":"7_CR82","doi-asserted-by":"crossref","unstructured":"Tiong, A.M.H., Li, J., Li, B., Savarese, S., Hoi, S.C.: Plug-and-play VQA: zero-shot VQA by conjoining large pretrained models with zero training. arXiv preprint arXiv:2210.08773 (2022)","DOI":"10.18653\/v1\/2022.findings-emnlp.67"},{"key":"7_CR83","doi-asserted-by":"crossref","unstructured":"Tong, S., Liu, Z., Zhai, Y., Ma, Y., LeCun, Y., Xie, S.: Eyes wide shut? Exploring the visual shortcomings of multimodal LLMs. arXiv preprint arXiv:2401.06209 (2024)","DOI":"10.1109\/CVPR52733.2024.00914"},{"key":"7_CR84","unstructured":"Touvron, H., et\u00a0al.: Llama: open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)"},{"key":"7_CR85","doi-asserted-by":"crossref","unstructured":"Vedantam, R., Lawrence\u00a0Zitnick, C., Parikh, D.: CIDEr: consensus-based image description evaluation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4566\u20134575 (2015)","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"7_CR86","unstructured":"Wang, J., et al.: GIT: a generative image-to-text transformer for vision and language. arXiv preprint arXiv:2205.14100 (2022)"},{"key":"7_CR87","unstructured":"Wang, P., et al.: OFA: unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In: International Conference on Machine Learning, pp. 23318\u201323340. PMLR (2022)"},{"key":"7_CR88","unstructured":"Wang, W., et\u00a0al.: CogVLM: visual expert for pretrained language models. arXiv preprint arXiv:2311.03079 (2023)"},{"key":"7_CR89","doi-asserted-by":"crossref","unstructured":"Wang, W., et al.: Image as a foreign language: BEiT pretraining for all vision and vision-language tasks. arXiv preprint arXiv:2208.10442 (2022)","DOI":"10.1109\/CVPR52729.2023.01838"},{"key":"7_CR90","unstructured":"Wang, Z., Yu, J., Yu, A.W., Dai, Z., Tsvetkov, Y., Cao, Y.: SimVLM: simple visual language model pretraining with weak supervision. arXiv preprint arXiv:2108.10904 (2021)"},{"key":"7_CR91","unstructured":"Xu, J., et al.: Pixel aligned language models. arXiv preprint arXiv: 2312.09237 (2023)"},{"key":"7_CR92","unstructured":"Yadav, D., et al.: EvalAI: towards better evaluation systems for AI agents (2019)"},{"key":"7_CR93","unstructured":"Yao, L., et al.: FILIP: fine-grained interactive language-image pre-training. arXiv preprint arXiv:2111.07783 (2021)"},{"key":"7_CR94","unstructured":"Ye, Q., et\u00a0al.: mPLUG-Owl: modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178 (2023)"},{"key":"7_CR95","unstructured":"Yu, J., Wang, Z., Vasudevan, V., Yeung, L., Seyedhosseini, M., Wu, Y.: CoCa: contrastive captioners are image-text foundation models. Transactions on Machine Learning Research (2022)"},{"key":"7_CR96","unstructured":"Yu, S., Cho, J., Yadav, P., Bansal, M.: Self-chained image-language model for video localization and question answering. arXiv preprint arXiv:2305.06988 (2023)"},{"key":"7_CR97","unstructured":"Yuan, L., et\u00a0al.: Florence: a new foundation model for computer vision. arXiv preprint arXiv:2111.11432 (2021)"},{"key":"7_CR98","doi-asserted-by":"crossref","unstructured":"Zhai, X., Kolesnikov, A., Houlsby, N., Beyer, L.: Scaling vision transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12104\u201312113 (2022)","DOI":"10.1109\/CVPR52688.2022.01179"},{"key":"7_CR99","doi-asserted-by":"crossref","unstructured":"Zhai, X., Mustafa, B., Kolesnikov, A., Beyer, L.: Sigmoid loss for language image pre-training. arXiv preprint arXiv:2303.15343 (2023)","DOI":"10.1109\/ICCV51070.2023.01100"},{"key":"7_CR100","doi-asserted-by":"crossref","unstructured":"Zhai, X., et al.: LiT: zero-shot transfer with locked-image text tuning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 18123\u201318133 (2022)","DOI":"10.1109\/CVPR52688.2022.01759"},{"key":"7_CR101","doi-asserted-by":"crossref","unstructured":"Zhang, D., et al.: MM-LLMs: recent advances in multimodal large language models. arXiv preprint arXiv:2401.13601 (2024)","DOI":"10.18653\/v1\/2024.findings-acl.738"},{"key":"7_CR102","unstructured":"Zhang, S., et\u00a0al.: OPT: open pre-trained transformer language models. arXiv preprint arXiv:2205.01068 (2022)"},{"key":"7_CR103","unstructured":"Zhao, Y., et al.: On evaluating adversarial robustness of large vision-language models. Adv. Neural Inf. Process. Syst. 36 (2024)"},{"key":"7_CR104","unstructured":"Zhou, J., et al.: iBoT: image BERT pre-training with online tokenizer. In: International Conference on Learning Representations (2022)"},{"key":"7_CR105","unstructured":"Zhu, D., Chen, J., Shen, X., Li, X., Elhoseiny, M.: MiniGPT-4: enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592 (2023)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72640-8_7","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,30]],"date-time":"2024-11-30T10:31:11Z","timestamp":1732962671000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72640-8_7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,29]]},"ISBN":["9783031726392","9783031726408"],"references-count":105,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72640-8_7","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,10,29]]},"assertion":[{"value":"29 October 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}