{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,29]],"date-time":"2026-01-29T01:26:48Z","timestamp":1769650008719,"version":"3.49.0"},"publisher-location":"Singapore","reference-count":21,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819530601","type":"print"},{"value":"9789819530618","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,11,13]],"date-time":"2025-11-13T00:00:00Z","timestamp":1762992000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,11,13]],"date-time":"2025-11-13T00:00:00Z","timestamp":1762992000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-981-95-3061-8_23","type":"book-chapter","created":{"date-parts":[[2025,11,12]],"date-time":"2025-11-12T05:02:30Z","timestamp":1762923750000},"page":"219-227","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["RAG with\u00a0Visual Alert: Boosting Multimodal Language Models for\u00a0Enhanced Visual Question Answering"],"prefix":"10.1007","author":[{"given":"Hongze","family":"Ou","sequence":"first","affiliation":[]},{"given":"Xiaoyu","family":"Liang","sequence":"additional","affiliation":[]},{"given":"Lianrui","family":"Mu","sequence":"additional","affiliation":[]},{"given":"Haoji","family":"Hu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,11,13]]},"reference":[{"issue":"1","key":"23_CR1","first-page":"4","volume":"123","author":"S Antol","year":"2015","unstructured":"Antol, S., Agrawal, A., Lu, J., et al.: VQA: visual question answering. Int. J. Comput. Vision 123(1), 4\u201331 (2015)","journal-title":"Int. J. Comput. Vision"},{"key":"23_CR2","doi-asserted-by":"crossref","unstructured":"Chen, L., et al.: Sharegpt4v: improving large multi-modal models with better captions (2023)","DOI":"10.1007\/978-3-031-72643-9_22"},{"key":"23_CR3","unstructured":"Chen, Z., Zhou, Q., Shen, Y., et\u00a0al.: See, think, confirm: interactive prompting between vision and language models for knowledge-based visual reasoning. arXiv preprint arXiv:2301.05226 (2023)"},{"key":"23_CR4","unstructured":"Dai, W., et al.: Instructblip: towards general-purpose vision-language models with instruction tuning (2023)"},{"key":"23_CR5","unstructured":"ExplosionAI: spacy transformers: industrial-strength natural language processing (NLP) with transformers in Python (2020)"},{"key":"23_CR6","doi-asserted-by":"crossref","unstructured":"Gao, F., Ping, Q., Thattai, G., Reganti, A., Wu, Y.N., Natarajan, P.: A thousand words are worth more than a picture: natural language-centric outside-knowledge visual question answering (2022)","DOI":"10.1109\/CVPR52688.2022.00501"},{"key":"23_CR7","unstructured":"Ilievski, I., Feng, J.: Multimodal learning and reasoning for visual question answering. In: Neural Information Processing Systems (2017)"},{"key":"23_CR8","doi-asserted-by":"crossref","unstructured":"Li, B., Wang, R., Wang, G., Ge, Y., Ge, Y., Shan, Y.: Seed-bench: benchmarking multimodal LLMs with generative comprehension (2023)","DOI":"10.1109\/CVPR52733.2024.01263"},{"key":"23_CR9","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: Blip-2: bootstrapping language-image pre-training with frozen image encoders and large language models (2023)"},{"key":"23_CR10","doi-asserted-by":"crossref","unstructured":"Li, X., et al.: Oscar: object-semantics aligned pre-training for vision-language tasks (2020)","DOI":"10.1007\/978-3-030-58577-8_8"},{"key":"23_CR11","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning (2023)"},{"issue":"12","key":"23_CR12","doi-asserted-by":"publisher","first-page":"14439","DOI":"10.1007\/s10462-023-10506-3","volume":"56","author":"R Liu","year":"2023","unstructured":"Liu, R., Mao, R., Luu, A.T., Cambria, E.: A brief survey on recent advances in coreference resolution. Artif. Intell. Rev. 56(12), 14439\u201314481 (2023)","journal-title":"Artif. Intell. Rev."},{"key":"23_CR13","unstructured":"Luan, B., Feng, H., Chen, H., Wang, Y., Zhou, W., Li, H.: Textcot: zoom in for enhanced multimodal text-rich image understanding. arXiv preprint arXiv:2404.09797 (2024)"},{"key":"23_CR14","doi-asserted-by":"crossref","unstructured":"Marino, K., Rastegari, M., Farhadi, A., Mottaghi, R.: OK-VQA: a visual question answering benchmark requiring external knowledge. IEEE (2020)","DOI":"10.1109\/CVPR.2019.00331"},{"key":"23_CR15","doi-asserted-by":"crossref","unstructured":"Mitra, C., Huang, B., Darrell, T., Herzig, R.: Compositional chain-of-thought prompting for large multimodal models (2024)","DOI":"10.1109\/CVPR52733.2024.01367"},{"key":"23_CR16","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster R-CNN: towards real-time object detection with region proposal networks. In: Advances in Neural Information Processing Systems (NeurIPS), pp. 91\u201399 (2015)"},{"key":"23_CR17","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"146","DOI":"10.1007\/978-3-031-20074-8_9","volume-title":"Computer Vision - ECCV 2022","author":"D Schwenk","year":"2022","unstructured":"Schwenk, D., Khandelwal, A., Clark, C., Marino, K., Mottaghi, R.: A-OKVQA: a benchmark for visual question answering using world knowledge. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13668, pp. 146\u2013162. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20074-8_9"},{"key":"23_CR18","doi-asserted-by":"crossref","unstructured":"Wang, J., Ju, J., Luan, J., Deng, Z.: Llava-SG: leveraging scene graphs as visual semantic expression in vision-language models (2024)","DOI":"10.1109\/ICASSP49660.2025.10887586"},{"key":"23_CR19","unstructured":"Wang, X., Huang, Z., Fu, J., Torralba, A., Xiao, J.: Vidil: exploring visual dynamics for complex reasoning. arXiv preprint arXiv:2303.01534 (2023)"},{"key":"23_CR20","doi-asserted-by":"crossref","unstructured":"Yang, Z., et al.: An empirical study of GPT-3 for few-shot knowledge-based VQA (2022)","DOI":"10.1609\/aaai.v36i3.20215"},{"key":"23_CR21","unstructured":"Yang, Z., Wang, D., Li, Z., Lin, X., Xie, X.: Multimodal chain-of-thought reasoning in language models. arXiv preprint arXiv:2305.04798 (2023)"}],"container-title":["Lecture Notes in Computer Science","Knowledge Science, Engineering and Management"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-95-3061-8_23","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,28]],"date-time":"2026-01-28T12:14:12Z","timestamp":1769602452000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-95-3061-8_23"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,13]]},"ISBN":["9789819530601","9789819530618"],"references-count":21,"URL":"https:\/\/doi.org\/10.1007\/978-981-95-3061-8_23","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,11,13]]},"assertion":[{"value":"13 November 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"KSEM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Knowledge Science, Engineering and Management","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Macao","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 August 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"7 August 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ksem2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/ksem2025.scimeeting.cn\/en\/web\/index\/27434","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}