{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T08:48:40Z","timestamp":1742978920665,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":32,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819620609"},{"type":"electronic","value":"9789819620616"}],"license":[{"start":{"date-parts":[[2024,12,31]],"date-time":"2024-12-31T00:00:00Z","timestamp":1735603200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,31]],"date-time":"2024-12-31T00:00:00Z","timestamp":1735603200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-981-96-2061-6_31","type":"book-chapter","created":{"date-parts":[[2024,12,30]],"date-time":"2024-12-30T05:46:51Z","timestamp":1735537611000},"page":"424-437","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["MM-CARP: Multimodal Model with\u00a0Cross-Modal Retrieval-Augmented and\u00a0Visual Region Perception"],"prefix":"10.1007","author":[{"given":"Junhao","family":"Guo","sequence":"first","affiliation":[]},{"given":"Chenhan","family":"Fu","sequence":"additional","affiliation":[]},{"given":"Guoming","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Rongxing","family":"Lu","sequence":"additional","affiliation":[]},{"given":"Dong","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Siliang","family":"Tang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,31]]},"reference":[{"key":"31_CR1","unstructured":"Borgeaud, S., et\u00a0al.: Improving language models by retrieving from trillions of tokens. In: International Conference on Machine Learning, pp. 2206\u20132240 (2022)"},{"key":"31_CR2","unstructured":"Chen, J., et al.: Minigpt-v2: large language model as a unified interface for vision-language multi-task learning. arXiv preprint arXiv:2310.09478 (2023)"},{"key":"31_CR3","first-page":"22243","volume":"33","author":"T Chen","year":"2020","unstructured":"Chen, T., Kornblith, S., Swersky, K., Norouzi, M., Hinton, G.E.: Big self-supervised models are strong semi-supervised learners. Adv. Neural. Inf. Process. Syst. 33, 22243\u201322255 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"31_CR4","doi-asserted-by":"crossref","unstructured":"Chen, W., Hu, H., Chen, X., Verga, P., Cohen, W.W.: MURAG: multimodal retrieval-augmented generator for open question answering over images and text (2022)","DOI":"10.18653\/v1\/2022.emnlp-main.375"},{"key":"31_CR5","unstructured":"Clark, P., et al.: Think you have solved question answering? Try arc, the AI2 reasoning challenge. arXiv:1803.05457v1 (2018)"},{"key":"31_CR6","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"},{"key":"31_CR7","doi-asserted-by":"crossref","unstructured":"Gao, P., et al.: Dynamic fusion with intra-and inter-modality attention flow for visual question answering. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6639\u20136648 (2019)","DOI":"10.1109\/CVPR.2019.00680"},{"key":"31_CR8","unstructured":"Gibson, E.J.: Principles of perceptual learning and development. Appleton-Century-Crofts (1969)"},{"key":"31_CR9","unstructured":"Hilton, J., Nakano, R., Balaji, S., Schulman, J.: WebGPT: improving the factual accuracy of language models through web browsing. OpenAI Blog, December 16 (2021)"},{"issue":"2","key":"31_CR10","doi-asserted-by":"publisher","first-page":"89","DOI":"10.1016\/0010-0277(87)90026-6","volume":"26","author":"R Jackendoff","year":"1987","unstructured":"Jackendoff, R.: On beyond zebra: the relation of linguistic and visual information. Cognition 26(2), 89\u2013114 (1987)","journal-title":"Cognition"},{"key":"31_CR11","doi-asserted-by":"crossref","unstructured":"Johnson, J., et al.: Inferring and executing programs for visual reasoning. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2989\u20132998 (2017)","DOI":"10.1109\/ICCV.2017.325"},{"key":"31_CR12","unstructured":"Khandelwal, U., Levy, O., Jurafsky, D., Zettlemoyer, L., Lewis, M.: Generalization through memorization: nearest neighbor language models. In: International Conference on Learning Representations (2020)"},{"key":"31_CR13","doi-asserted-by":"crossref","unstructured":"Khashabi, D., et al.: UnifiedQA: crossing format boundaries with a single QA system. In: Findings of the Association for Computational Linguistics 2020, pp. 1896\u20131907 (2020)","DOI":"10.18653\/v1\/2020.findings-emnlp.171"},{"key":"31_CR14","unstructured":"Kim, W., Son, B., Kim, I.: VILT: vision-and-language transformer without convolution or region supervision. In: International Conference on Machine Learning, pp. 5583\u20135594 (2021)"},{"key":"31_CR15","doi-asserted-by":"crossref","unstructured":"Komeili, M., Shuster, K., Weston, J.: Internet-augmented dialogue generation. arXiv preprint arXiv:2107.07566 (2021)","DOI":"10.18653\/v1\/2022.acl-long.579"},{"key":"31_CR16","unstructured":"Lan, Z., Chen, M., Goodman, S., Gimpel, K., Sharma, P., Soricut, R.: Albert: a lite BERT for self-supervised learning of language representations. arXiv preprint arXiv:1909.11942 (2019)"},{"key":"31_CR17","unstructured":"Li, L.H., Yatskar, M., Yin, D., Hsieh, C.J., Chang, K.W.: Visualbert: a simple and performant baseline for vision and language (2019)"},{"key":"31_CR18","unstructured":"Lin, W., Chen, J., Mei, J., Coca, A., Byrne, B.: Fine-grained late-interaction multi-modal retrieval for retrieval augmented visual question answering. In: Advances in Neural Information Processing Systems, vol. 36 (2024)"},{"key":"31_CR19","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning (2023)"},{"key":"31_CR20","unstructured":"Liu, Y., et al.: Roberta: a robustly optimized BERT pretraining approach. arXiv preprint arXiv:1907.11692 (2019)"},{"key":"31_CR21","first-page":"2507","volume":"35","author":"P Lu","year":"2022","unstructured":"Lu, P., et al.: Learn to explain: multimodal reasoning via thought chains for science question answering. Adv. Neural. Inf. Process. Syst. 35, 2507\u20132521 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"31_CR22","unstructured":"Lu, P., et al.: Chameleon: plug-and-play compositional reasoning with large language models (2023)"},{"key":"31_CR23","doi-asserted-by":"crossref","unstructured":"Marino, K., Rastegari, M., Farhadi, A., Mottaghi, R.: Ok-VQA: a visual question answering benchmark requiring external knowledge. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3195\u20133204 (2019)","DOI":"10.1109\/CVPR.2019.00331"},{"issue":"5737","key":"31_CR24","doi-asserted-by":"publisher","first-page":"403","DOI":"10.1038\/282403a0","volume":"282","author":"AN Meltzoff","year":"1979","unstructured":"Meltzoff, A.N., Borton, R.W.: Intermodal matching by human neonates. Nature 282(5737), 403\u2013404 (1979)","journal-title":"Nature"},{"key":"31_CR25","unstructured":"OpenAI: Gpt-4 technical report (2023)"},{"key":"31_CR26","doi-asserted-by":"publisher","first-page":"23","DOI":"10.1038\/nature03687","volume":"435","author":"RQ Quiroga","year":"2005","unstructured":"Quiroga, R.Q., Reddy, L., Kreiman, G., Koch, C., Fried, I.: Invariant visual representation by single neurons in the human brain. Nature 435, 23 (2005)","journal-title":"Nature"},{"key":"31_CR27","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"key":"31_CR28","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I.: Language models are unsupervised multitask learners (2019). openAI preprint"},{"key":"31_CR29","unstructured":"Sharifzadeh, S., et al.: Synth2: boosting visual-language models with synthetic captions and image embeddings. arXiv preprint arXiv:2403.07750 (2024)"},{"key":"31_CR30","unstructured":"Yasunaga, M., et al.: Retrieval-augmented multimodal language modeling. In: International Conference on Machine Learning, pp. 39755\u201339769. PMLR (2023)"},{"key":"31_CR31","doi-asserted-by":"crossref","unstructured":"Yuan, Z., et al.: RAMM: retrieval-augmented biomedical visual question answering with multi-modal pre-training. In: Proceedings of the 31st ACM International Conference on Multimedia, pp. 547\u2013556 (2023)","DOI":"10.1145\/3581783.3611830"},{"key":"31_CR32","unstructured":"Zhang, Z., Zhang, A., Li, M., Zhao, H., Karypis, G., Smola, A.: Multimodal chain-of-thought reasoning in language models (2023)"}],"container-title":["Lecture Notes in Computer Science","MultiMedia Modeling"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-96-2061-6_31","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,30]],"date-time":"2024-12-30T06:07:17Z","timestamp":1735538837000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-96-2061-6_31"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,31]]},"ISBN":["9789819620609","9789819620616"],"references-count":32,"URL":"https:\/\/doi.org\/10.1007\/978-981-96-2061-6_31","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,12,31]]},"assertion":[{"value":"31 December 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MMM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Multimedia Modeling","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Nara","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Japan","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"9 January 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"11 January 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"31","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"mmm2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/mmm2025.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}