{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T17:40:07Z","timestamp":1777657207267,"version":"3.51.4"},"publisher-location":"Cham","reference-count":63,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031730207","type":"print"},{"value":"9783031730214","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,21]],"date-time":"2024-11-21T00:00:00Z","timestamp":1732147200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,21]],"date-time":"2024-11-21T00:00:00Z","timestamp":1732147200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73021-4_23","type":"book-chapter","created":{"date-parts":[[2024,11,20]],"date-time":"2024-11-20T09:19:49Z","timestamp":1732094389000},"page":"387-404","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":24,"title":["UniIR: Training and\u00a0Benchmarking Universal Multimodal Information Retrievers"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0006-0835-784X","authenticated-orcid":false,"given":"Cong","family":"Wei","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0003-5667-7803","authenticated-orcid":false,"given":"Yang","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Haonan","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Hexiang","family":"Hu","sequence":"additional","affiliation":[]},{"given":"Ge","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Jie","family":"Fu","sequence":"additional","affiliation":[]},{"given":"Alan","family":"Ritter","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0002-0947-8388","authenticated-orcid":false,"given":"Wenhu","family":"Chen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,21]]},"reference":[{"key":"23_CR1","unstructured":"Achiam, J., et al.: GPT-4 technical report. arXiv preprint arXiv:2303.08774 (2023)"},{"key":"23_CR2","doi-asserted-by":"crossref","unstructured":"Asai, A., Min, S., Zhong, Z., Chen, D.: Retrieval-based language models and applications. In: Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 6: Tutorial Abstracts), pp. 41\u201346 (2023)","DOI":"10.18653\/v1\/2023.acl-tutorials.6"},{"key":"23_CR3","doi-asserted-by":"crossref","unstructured":"Asai, A., et al.: Task-aware retrieval with instructions. Findings of ACL (2022)","DOI":"10.18653\/v1\/2023.findings-acl.225"},{"key":"23_CR4","unstructured":"Blattmann, A., Rombach, R., Oktay, K., M\u00fcller, J., Ommer, B.: Semi-parametric neural image synthesis. In: Oh, A.H., Agarwal, A., Belgrave, D., Cho, K. (eds.) Advances in Neural Information Processing Systems (2022)"},{"key":"23_CR5","doi-asserted-by":"crossref","unstructured":"Brooks, T., Holynski, A., Efros, A.A.: Instructpix2pix: learning to follow image editing instructions. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 18392\u201318402 (2023)","DOI":"10.1109\/CVPR52729.2023.01764"},{"key":"23_CR6","unstructured":"Brooks, T., et al.: Video generation models as world simulators (2024). https:\/\/openai.com\/research\/video-generation-models-as-world-simulators"},{"key":"23_CR7","doi-asserted-by":"crossref","unstructured":"Chang, Y., Narang, M., Suzuki, H., Cao, G., Gao, J., Bisk, Y.: Webqa: multihop and multimodal QA. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 16495\u201316504 (2022)","DOI":"10.1109\/CVPR52688.2022.01600"},{"key":"23_CR8","doi-asserted-by":"crossref","unstructured":"Changpinyo, S., Pont-Tuset, J., Ferrari, V., Soricut, R.: Telling the what while pointing to the where: multimodal queries for image retrieval. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 12136\u201312146 (2021)","DOI":"10.1109\/ICCV48922.2021.01192"},{"key":"23_CR9","unstructured":"Chen, W., Hu, H., Saharia, C., Cohen, W.W.: Re-imagen: retrieval-augmented text-to-image generator. In: The International Conference on Learning Representations (2022)"},{"key":"23_CR10","doi-asserted-by":"crossref","unstructured":"Chen, Y., et al.: Can pre-trained vision and language models answer visual information-seeking questions? In: Proceedings of Conference on Empirical Methods in Natural Language Processing (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.925"},{"key":"23_CR11","doi-asserted-by":"crossref","unstructured":"Chen, Y.C., et al.: Uniter: learning universal image-text representations (2019)","DOI":"10.1007\/978-3-030-58577-8_7"},{"key":"23_CR12","unstructured":"Chowdhery, A., et al.: Palm: scaling language modeling with pathways. arXiv preprint arXiv:2204.02311 (2022)"},{"key":"23_CR13","unstructured":"Chung, H.W., et al.: Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416 (2022)"},{"key":"23_CR14","unstructured":"Dai, W., et al.: Instructblip: towards general-purpose vision-language models with instruction tuning. In: Advances in Neural Information Processing Systems (2023)"},{"key":"23_CR15","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale. In: The International Conference on Learning Representations (2020)"},{"key":"23_CR16","unstructured":"Fu, S., et al.: Dreamsim: learning new dimensions of human visual similarity using synthetic data. In: Advances in Neural Information Processing Systems (2023)"},{"key":"23_CR17","unstructured":"Ge, Y., et al.: Making llama see and draw with seed tokenizer. arXiv preprint arXiv:2310.01218 (2023)"},{"key":"23_CR18","doi-asserted-by":"crossref","unstructured":"Girdhar, R., et al.: Imagebind: one embedding space to bind them all. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 15180\u201315190 (2023)","DOI":"10.1109\/CVPR52729.2023.01457"},{"key":"23_CR19","doi-asserted-by":"crossref","unstructured":"Han, X., et al.: Automatic spatially-aware fashion concept discovery. In: Proceedings of the IEEE International Conference on Computer Vision (2017)","DOI":"10.1109\/ICCV.2017.163"},{"key":"23_CR20","doi-asserted-by":"crossref","unstructured":"Hu, H., et al.: Instruct-imagen: image generation with multi-modal instruction. arXiv preprint arXiv:2401.01952 (2024)","DOI":"10.1109\/CVPR52733.2024.00455"},{"key":"23_CR21","doi-asserted-by":"crossref","unstructured":"Hu, H., et al.: Open-domain visual entity recognition: towards recognizing millions of wikipedia entities. In: Proceedings of the IEEE International Conference on Computer Vision (2023)","DOI":"10.1109\/ICCV51070.2023.01108"},{"key":"23_CR22","doi-asserted-by":"crossref","unstructured":"Jain, A., et al.: Mural: multimodal, multitask retrieval across languages. Findings of the Association for Computational Linguistics: EMNLP (2021)","DOI":"10.18653\/v1\/2021.findings-emnlp.293"},{"key":"23_CR23","unstructured":"Jia, C., et al.: Scaling up visual and vision-language representation learning with noisy text supervision. In: International Conference on Machine Learning, pp. 4904\u20134916. PMLR (2021)"},{"issue":"3","key":"23_CR24","doi-asserted-by":"publisher","first-page":"535","DOI":"10.1109\/TBDATA.2019.2921572","volume":"7","author":"J Johnson","year":"2019","unstructured":"Johnson, J., Douze, M., J\u00e9gou, H.: Billion-scale similarity search with GPUs. IEEE Trans. Big Data 7(3), 535\u2013547 (2019)","journal-title":"IEEE Trans. Big Data"},{"key":"23_CR25","doi-asserted-by":"crossref","unstructured":"Karpathy, A., Fei-Fei, L.: Deep visual-semantic alignments for generating image descriptions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3128\u20133137 (2015)","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"23_CR26","unstructured":"Kim, W., Son, B., Kim, I.: VILT: vision-and-language transformer without convolution or region supervision. In: International Conference on Machine Learning, pp. 5583\u20135594. PMLR (2021)"},{"key":"23_CR27","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: Blip-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In: International Conference on Machine Learning (2023)"},{"key":"23_CR28","unstructured":"Li, J., Li, D., Xiong, C., Hoi, S.: Blip: bootstrapping language-image pre-training for unified vision-language understanding and generation. In: International Conference on Machine Learning, pp. 12888\u201312900. PMLR (2022)"},{"key":"23_CR29","first-page":"9694","volume":"34","author":"J Li","year":"2021","unstructured":"Li, J., Selvaraju, R., Gotmare, A., Joty, S., Xiong, C., Hoi, S.C.H.: Align before fuse: vision and language representation learning with momentum distillation. Adv. Neural. Inf. Process. Syst. 34, 9694\u20139705 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"23_CR30","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"23_CR31","unstructured":"Lin, X.V., et al.: RA-DIT: retrieval-augmented dual instruction tuning. arXiv preprint arXiv:2310.01352 (2023)"},{"key":"23_CR32","doi-asserted-by":"crossref","unstructured":"Liu, F., Wang, Y., Wang, T., Ordonez, V.: Visual news: benchmark and challenges in news image captioning. In: Proceedings of the Conference on Empirical Methods in Natural Language Processing (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.542"},{"key":"23_CR33","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning. In: Advances in Neural Information Processing Systems (2023)"},{"key":"23_CR34","doi-asserted-by":"crossref","unstructured":"Liu, S., Feng, W., Chen, W., Wang, W.Y.: EDIS: entity-driven image search over multimodal web content. In: Proceedings of Conference on Empirical Methods in Natural Language Processing (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.297"},{"key":"23_CR35","unstructured":"Liu, Z., Xiong, C., Lv, Y., Liu, Z., Yu, G.: Universal vision-language dense retrieval: learning a unified representation space for multi-modal retrieval. In: The Eleventh International Conference on Learning Representations (2022)"},{"key":"23_CR36","doi-asserted-by":"crossref","unstructured":"Liu, Z., Rodriguez-Opazo, C., Teney, D., Gould, S.: Image retrieval on real-life images with pre-trained vision-and-language models. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 2125\u20132134 (2021)","DOI":"10.1109\/ICCV48922.2021.00213"},{"key":"23_CR37","doi-asserted-by":"crossref","unstructured":"Luo, M., Fang, Z., Gokhale, T., Yang, Y., Baral, C.: End-to-end knowledge retrieval with multi-modal queries. In: Annual Meeting of the Association for Computational Linguistics (2023)","DOI":"10.18653\/v1\/2023.acl-long.478"},{"key":"23_CR38","doi-asserted-by":"crossref","unstructured":"Mishra, S., Khashabi, D., Baral, C., Hajishirzi, H.: Cross-task generalization via natural language crowdsourcing instructions. In: Annual Meeting of the Association for Computational Linguistics (2021)","DOI":"10.18653\/v1\/2022.acl-long.244"},{"key":"23_CR39","unstructured":"OpenAI: GPT-4 technical report. arXiv abs\/2303.08774 (2023). https:\/\/api.semanticscholar.org\/CorpusID:257532815"},{"key":"23_CR40","first-page":"27730","volume":"35","author":"L Ouyang","year":"2022","unstructured":"Ouyang, L., et al.: Training language models to follow instructions with human feedback. Adv. Neural. Inf. Process. Syst. 35, 27730\u201327744 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"23_CR41","unstructured":"Pan, X., Dong, L., Huang, S., Peng, Z., Chen, W., Wei, F.: Kosmos-g: generating images in context with multimodal large language models. arXiv preprint arXiv:2310.02992 (2023)"},{"key":"23_CR42","doi-asserted-by":"crossref","unstructured":"Plummer, B.A., Wang, L., Cervantes, C.M., Caicedo, J.C., Hockenmaier, J., Lazebnik, S.: Flickr30k entities: collecting region-to-phrase correspondences for richer image-to-sentence models. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2641\u20132649 (2015)","DOI":"10.1109\/ICCV.2015.303"},{"key":"23_CR43","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"key":"23_CR44","doi-asserted-by":"publisher","first-page":"1316","DOI":"10.1162\/tacl_a_00605","volume":"11","author":"O Ram","year":"2023","unstructured":"Ram, O., et al.: In-context retrieval-augmented language models. Trans. Assoc. Comput. Linguist. 11, 1316\u20131331 (2023)","journal-title":"Trans. Assoc. Comput. Linguist."},{"key":"23_CR45","unstructured":"Sheynin, S., et al.: kNN-diffusion: image generation via large-scale retrieval. In: The Eleventh International Conference on Learning Representations (2023)"},{"issue":"4","key":"23_CR46","first-page":"35","volume":"24","author":"A Singhal","year":"2001","unstructured":"Singhal, A., et al.: Modern information retrieval: a brief overview. IEEE Data Eng. Bull. 24(4), 35\u201343 (2001)","journal-title":"IEEE Data Eng. Bull."},{"key":"23_CR47","doi-asserted-by":"crossref","unstructured":"Su, H., et al.: One embedder, any task: instruction-finetuned text embeddings. In: Rogers, A., Boyd-Graber, J., Okazaki, N. (eds.) Findings of the Association for Computational Linguistics: ACL 2023 (2023)","DOI":"10.18653\/v1\/2023.findings-acl.71"},{"key":"23_CR48","unstructured":"Sun, Q., et al.: Generative multimodal models are in-context learners. arXiv preprint arXiv:2312.13286 (2023)"},{"key":"23_CR49","doi-asserted-by":"crossref","unstructured":"Tang, Z., Yang, Z., Khademi, M., Liu, Y., Zhu, C., Bansal, M.: Codi-2: in-context, interleaved, and interactive any-to-any generation. arXiv preprint arXiv:2311.18775 (2023)","DOI":"10.1109\/CVPR52733.2024.02589"},{"key":"23_CR50","unstructured":"Tang, Z., Yang, Z., Zhu, C., Zeng, M., Bansal, M.: Any-to-any generation via composable diffusion. In: Advances in Neural Information Processing Systems, vol. 36 (2024)"},{"key":"23_CR51","unstructured":"Team, G., et al.: Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805 (2023)"},{"key":"23_CR52","unstructured":"Thakur, N., Reimers, N., R\u00fcckl\u00e9, A., Srivastava, A., Gurevych, I.: BEIR: a heterogenous benchmark for zero-shot evaluation of information retrieval models. In: Advances in Neural Information Processing Systems (2021)"},{"key":"23_CR53","unstructured":"Touvron, H., et al.: Llama: open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)"},{"key":"23_CR54","unstructured":"Wang, B., et al.: Instructretro: instruction tuning post retrieval-augmented pretraining. arXiv preprint arXiv:2310.07713 (2023)"},{"key":"23_CR55","unstructured":"Wei, J., et al.: Finetuned language models are zero-shot learners. In: The International Conference on Learning Representations (2021)"},{"key":"23_CR56","doi-asserted-by":"crossref","unstructured":"Wu, H., et al.: Fashion IQ: a new dataset towards retrieving images by natural language feedback. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11307\u201311317 (2021)","DOI":"10.1109\/CVPR46437.2021.01115"},{"key":"23_CR57","unstructured":"Wu, S., Fei, H., Qu, L., Ji, W., Chua, T.S.: Next-GPT: any-to-any multimodal LLM. arXiv preprint arXiv:2309.05519 (2023)"},{"key":"23_CR58","doi-asserted-by":"crossref","unstructured":"Xu, Z., Shen, Y., Huang, L.: Multiinstruct: improving multi-modal zero-shot learning via instruction tuning. In: Annual Meeting of the Association for Computational Linguistics (2023)","DOI":"10.18653\/v1\/2023.acl-long.641"},{"key":"23_CR59","unstructured":"Yang, Z., Li, L., Lin, K., Wang, J., Lin, C.C., Liu, Z., Wang, L.: The dawn of LMMs: preliminary explorations with GPT-4V (ision). arXiv preprint arXiv:2309.17421, vol. 9, no. 1, p. 1 (2023)"},{"key":"23_CR60","unstructured":"Yasunaga, M., et al.: Retrieval-augmented multimodal language modeling (2023)"},{"key":"23_CR61","unstructured":"Yu, L., et al.: Scaling autoregressive multi-modal models: pretraining and instruction tuning. arXiv preprint arXiv:2309.02591 (2023)"},{"key":"23_CR62","doi-asserted-by":"crossref","unstructured":"Zhai, X., Mustafa, B., Kolesnikov, A., Beyer, L.: Sigmoid loss for language image pre-training. In: Proceedings of the IEEE International Conference on Computer Vision (2023)","DOI":"10.1109\/ICCV51070.2023.01100"},{"key":"23_CR63","unstructured":"Zhang, K., Mo, L., Chen, W., Sun, H., Su, Y.: Magicbrush: a manually annotated dataset for instruction-guided image editing. In: Advances in neural information processing systems (2023)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73021-4_23","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,20]],"date-time":"2024-11-20T09:48:18Z","timestamp":1732096098000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73021-4_23"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,21]]},"ISBN":["9783031730207","9783031730214"],"references-count":63,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73021-4_23","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,21]]},"assertion":[{"value":"21 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}