{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T17:10:44Z","timestamp":1777655444590,"version":"3.51.4"},"publisher-location":"Cham","reference-count":36,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031533013","type":"print"},{"value":"9783031533020","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-53302-0_3","type":"book-chapter","created":{"date-parts":[[2024,1,28]],"date-time":"2024-01-28T09:02:09Z","timestamp":1706432529000},"page":"32-45","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":29,"title":["Mitigating Fine-Grained Hallucination by\u00a0Fine-Tuning Large Vision-Language Models with\u00a0Caption Rewrites"],"prefix":"10.1007","author":[{"given":"Lei","family":"Wang","sequence":"first","affiliation":[]},{"given":"Jiabang","family":"He","sequence":"additional","affiliation":[]},{"given":"Shenshen","family":"Li","sequence":"additional","affiliation":[]},{"given":"Ning","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Ee-Peng","family":"Lim","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,1,29]]},"reference":[{"key":"3_CR1","doi-asserted-by":"crossref","unstructured":"Agrawal, H., et al.: nocaps: novel object captioning at scale. In: 2019 IEEE\/CVF International Conference on Computer Vision, ICCV, pp. 8947\u20138956. IEEE (2019)","DOI":"10.1109\/ICCV.2019.00904"},{"key":"3_CR2","doi-asserted-by":"publisher","unstructured":"Bang, Y., et al.: A multitask, multilingual, multimodal evaluation of chatgpt on reasoning, hallucination, and interactivity. CoRR abs\/2302.04023 (2023). https:\/\/doi.org\/10.48550\/arXiv.2302.04023. https:\/\/doi.org\/10.48550\/arXiv.2302.04023","DOI":"10.48550\/arXiv.2302.04023"},{"key":"3_CR3","doi-asserted-by":"crossref","unstructured":"Biten, A.F., G\u00f3mez, L., Karatzas, D.: Let there be a clock on the beach: educing object hallucination in image captioning. In: IEEE\/CVF Winter Conference on Applications of Computer Vision, WACV (2022)","DOI":"10.1109\/WACV51458.2022.00253"},{"key":"3_CR4","unstructured":"Brown, T.B., et al.: Language models are few-shot learners. In: NeurIPS (2020)"},{"key":"3_CR5","unstructured":"Chung, H.W., et al.: Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416 (2022)"},{"key":"3_CR6","unstructured":"Dai, W., et al.: Instructblip: Towards general-purpose vision-language models with instruction tuning. arXiv preprint arXiv:2305.06500 (2023)"},{"key":"3_CR7","unstructured":"Gong, T., et al.: Multimodal-gpt: A vision and language model for dialogue with humans. arXiv preprint arXiv:2305.04790 (2023)"},{"key":"3_CR8","unstructured":"Huang, Y., Feng, X., Feng, X., Qin, B.: The factual inconsistency problem in abstractive text summarization: a survey. arXiv preprint arXiv:2104.14839 (2021)"},{"issue":"12","key":"3_CR9","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3571730","volume":"55","author":"Z Ji","year":"2023","unstructured":"Ji, Z., et al.: Survey of hallucination in natural language generation. ACM Comput. Surv. 55(12), 1\u201338 (2023)","journal-title":"ACM Comput. Surv."},{"key":"3_CR10","doi-asserted-by":"crossref","unstructured":"Lee, S., Park, S.H., Jo, Y., Seo, M.: Volcano: mitigating multimodal hallucination through self-feedback guided revision. arXiv preprint arXiv:2311.07362 (2023)","DOI":"10.18653\/v1\/2024.naacl-long.23"},{"key":"3_CR11","unstructured":"Li, J., Li, D., Xiong, C., Hoi, S.: Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In: International Conference on Machine Learning, pp. 12888\u201312900. PMLR (2022)"},{"key":"3_CR12","doi-asserted-by":"crossref","unstructured":"Li, Y., Du, Y., Zhou, K., Wang, J., Zhao, W.X., Wen, J.R.: Evaluating object hallucination in large vision-language models. arXiv preprint arXiv:2305.10355 (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.20"},{"key":"3_CR13","unstructured":"Liu, F., Lin, K., Li, L., Wang, J., Yacoob, Y., Wang, L.: Aligning large multi-modal model with robust instruction tuning. arXiv preprint arXiv:2306.14565 (2023)"},{"key":"3_CR14","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning (2023)"},{"key":"3_CR15","unstructured":"Lu, P., et al.: Learn to explain: multimodal reasoning via thought chains for science question answering. In: The 36th Conference on Neural Information Processing Systems (NeurIPS) (2022)"},{"key":"3_CR16","doi-asserted-by":"crossref","unstructured":"Maynez, J., Narayan, S., Bohnet, B., McDonald, R.: On faithfulness and factuality in abstractive summarization. In: Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 1906\u20131919 (2020)","DOI":"10.18653\/v1\/2020.acl-main.173"},{"key":"3_CR17","unstructured":"OpenAI: Introducing chatgpt (2022). https:\/\/openai.com\/blog\/chatgpt"},{"key":"3_CR18","unstructured":"OpenAI: GPT-4 technical report. CoRR abs\/2303.08774 (2023)"},{"key":"3_CR19","doi-asserted-by":"crossref","unstructured":"Qin, C., Zhang, A., Zhang, Z., Chen, J., Yasunaga, M., Yang, D.: Is chatgpt a general-purpose natural language processing task solver? arXiv preprint arXiv:2302.06476 (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.85"},{"key":"3_CR20","doi-asserted-by":"crossref","unstructured":"Rohrbach, A., Hendricks, L.A., Burns, K., Darrell, T., Saenko, K.: Object hallucination in image captioning. In: EMNLP (2018)","DOI":"10.18653\/v1\/D18-1437"},{"key":"3_CR21","doi-asserted-by":"crossref","unstructured":"Schwenk, D., Khandelwal, A., Clark, C., Marino, K., Mottaghi, R.: A-OKVQA: a benchmark for visual question answering using world knowledge. In: ECCV 2022, vol. 13668, pp. 146\u2013162. Springer (2022)","DOI":"10.1007\/978-3-031-20074-8_9"},{"key":"3_CR22","unstructured":"Shen, Y., Song, K., Tan, X., Li, D., Lu, W., Zhuang, Y.: Hugginggpt: solving AI tasks with chatgpt and its friends in huggingface. CoRR abs\/2303.17580 (2023)"},{"key":"3_CR23","doi-asserted-by":"crossref","unstructured":"Shuster, K., Poff, S., Chen, M., Kiela, D., Weston, J.: Retrieval augmentation reduces hallucination in conversation. EMNLP (2021)","DOI":"10.18653\/v1\/2021.findings-emnlp.320"},{"key":"3_CR24","unstructured":"Touvron, H., et al.: Llama: open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)"},{"key":"3_CR25","unstructured":"Wang, J., et al.: An llm-free multi-dimensional benchmark for mllms hallucination evaluation. arXiv preprint arXiv:2311.07397 (2023)"},{"key":"3_CR26","unstructured":"Wang, J., et al.: Evaluation and analysis of hallucination in large vision-language models. arXiv preprint arXiv:2308.15126 (2023)"},{"key":"3_CR27","doi-asserted-by":"crossref","unstructured":"Wang, Y., et al.: Self-instruct: aligning language model with self generated instructions. arXiv preprint arXiv:2212.10560 (2022)","DOI":"10.18653\/v1\/2023.acl-long.754"},{"key":"3_CR28","unstructured":"Wu, C., Yin, S., Qi, W., Wang, X., Tang, Z., Duan, N.: Visual chatgpt: talking, drawing and editing with visual foundation models. CoRR abs\/2303.04671 (2023)"},{"key":"3_CR29","doi-asserted-by":"crossref","unstructured":"Wu, Z., et al.: A controllable model of grounded response generation. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35 (2021)","DOI":"10.1609\/aaai.v35i16.17658"},{"key":"3_CR30","doi-asserted-by":"crossref","unstructured":"Xiao, Y., Wang, W.Y.: On hallucination and predictive uncertainty in conditional language generation. In: Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, pp. 2734\u20132744 (2021)","DOI":"10.18653\/v1\/2021.eacl-main.236"},{"key":"3_CR31","unstructured":"Xu, P., et al.: Lvlm-ehub: a comprehensive evaluation benchmark for large vision-language models (2023)"},{"key":"3_CR32","doi-asserted-by":"crossref","unstructured":"Xu, Z., Shen, Y., Huang, L.: Multiinstruct: improving multi-modal zero-shot learning via instruction tuning. arXiv preprint arXiv:2212.10773 (2022)","DOI":"10.18653\/v1\/2023.acl-long.641"},{"key":"3_CR33","unstructured":"Yang, Z., et al.: MM-REACT: prompting chatgpt for multimodal reasoning and action. CoRR abs\/2303.11381 (2023)"},{"key":"3_CR34","unstructured":"Ye, Q., et al.: mplug-owl: modularization empowers large language models with multimodality (2023)"},{"key":"3_CR35","unstructured":"Yin, S., et al.: Woodpecker: hallucination correction for multimodal large language models. arXiv preprint arXiv:2310.16045 (2023)"},{"key":"3_CR36","unstructured":"Zhu, D., Chen, J., Shen, X., Li, X., Elhoseiny, M.: Minigpt-4: enhancing vision-language understanding with advanced large language models (2023)"}],"container-title":["Lecture Notes in Computer Science","MultiMedia Modeling"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-53302-0_3","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,9]],"date-time":"2024-11-09T10:10:55Z","timestamp":1731147055000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-53302-0_3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031533013","9783031533020"],"references-count":36,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-53302-0_3","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"29 January 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MMM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Multimedia Modeling","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Amsterdam","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"The Netherlands","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 January 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2 February 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"mmm2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"ConfTool Pro","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"297","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"112","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"38% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.2","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.2","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}