{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,12]],"date-time":"2025-11-12T14:26:19Z","timestamp":1762957579005,"version":"3.40.3"},"publisher-location":"Cham","reference-count":64,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031732287"},{"type":"electronic","value":"9783031732294"}],"license":[{"start":{"date-parts":[[2024,10,25]],"date-time":"2024-10-25T00:00:00Z","timestamp":1729814400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,10,25]],"date-time":"2024-10-25T00:00:00Z","timestamp":1729814400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73229-4_5","type":"book-chapter","created":{"date-parts":[[2024,10,24]],"date-time":"2024-10-24T15:03:09Z","timestamp":1729782189000},"page":"70-87","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["BRIDGE: Bridging Gaps in\u00a0Image Captioning Evaluation with\u00a0Stronger Visual Cues"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1057-3374","authenticated-orcid":false,"given":"Sara","family":"Sarto","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9640-9385","authenticated-orcid":false,"given":"Marcella","family":"Cornia","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5125-4957","authenticated-orcid":false,"given":"Lorenzo","family":"Baraldi","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2239-283X","authenticated-orcid":false,"given":"Rita","family":"Cucchiara","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,25]]},"reference":[{"key":"5_CR1","unstructured":"Aditya, S., Yang, Y., Baral, C., Fermuller, C., Aloimonos, Y.: From images to sentences through scene description graphs using commonsense reasoning and knowledge. arXiv preprint arXiv:1511.03292 (2015)"},{"key":"5_CR2","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"382","DOI":"10.1007\/978-3-319-46454-1_24","volume-title":"Computer Vision \u2013 ECCV 2016","author":"P Anderson","year":"2016","unstructured":"Anderson, P., Fernando, B., Johnson, M., Gould, S.: SPICE: semantic propositional image caption evaluation. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9909, pp. 382\u2013398. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46454-1_24"},{"key":"5_CR3","doi-asserted-by":"crossref","unstructured":"Anderson, P., et al.: Bottom-up and top-down attention for image captioning and visual question answering. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00636"},{"key":"5_CR4","unstructured":"Banerjee, S., Lavie, A.: METEOR: an automatic metric for MT evaluation with improved correlation with human judgments. In: ACL Workshops (2005)"},{"key":"5_CR5","doi-asserted-by":"crossref","unstructured":"Barraco, M., Sarto, S., Cornia, M., Baraldi, L., Cucchiara, R.: With a little help from your own past: prototypical memory networks for image captioning. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00282"},{"key":"5_CR6","unstructured":"Bird, S., Klein, E., Loper, E.: Natural Language Processing with Python: Analyzing Text with the Natural Language Toolkit. Inc, O\u2019Reilly Media (2009)"},{"key":"5_CR7","doi-asserted-by":"crossref","unstructured":"Caffagni, D., et al.: The revolution of multimodal large language models: a survey. In: ACL Findings (2024)","DOI":"10.18653\/v1\/2024.findings-acl.807"},{"key":"5_CR8","doi-asserted-by":"crossref","unstructured":"Chan, D., Petryk, S., Gonzalez, J.E., Darrell, T., Canny, J.: CLAIR: evaluating image captions with large language models. In: EMNLP (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.841"},{"key":"5_CR9","unstructured":"Chen, J., Li, D.Z.X.S.X., Zhang, Z.L.P., Xiong, R.K.V.C.Y., Elhoseiny, M.: MiniGPT-v2: large language model as a unified interface for vision-language multi-task learning. arXiv preprint arXiv:2310.09478 (2023)"},{"key":"5_CR10","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"104","DOI":"10.1007\/978-3-030-58577-8_7","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Y-C Chen","year":"2020","unstructured":"Chen, Y.-C., et al.: UNITER: UNiversal Image-TExt representation learning. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12375, pp. 104\u2013120. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58577-8_7"},{"key":"5_CR11","unstructured":"Chiang, W.L., et al.: Vicuna: an open-source chatbot impressing GPT-4 with 90%* ChatGPT Quality (2023)"},{"issue":"70","key":"5_CR12","first-page":"1","volume":"25","author":"HW Chung","year":"2024","unstructured":"Chung, H.W., et al.: Scaling instruction-finetuned language models. JMLR 25(70), 1\u201353 (2024)","journal-title":"JMLR"},{"key":"5_CR13","doi-asserted-by":"crossref","unstructured":"Cornia, M., Baraldi, L., Cucchiara, R.: SMArT: training shallow memory-aware transformers for robotic explainability. In: ICRA (2020)","DOI":"10.1109\/ICRA40945.2020.9196653"},{"key":"5_CR14","doi-asserted-by":"crossref","unstructured":"Cornia, M., Stefanini, M., Baraldi, L., Cucchiara, R.: Meshed-memory transformer for image captioning. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01059"},{"key":"5_CR15","unstructured":"Dai, W., et al.: InstructBLIP: towards general-purpose vision-language models with instruction tuning. arXiv preprint arXiv:2305.06500 (2023)"},{"key":"5_CR16","unstructured":"Dong, H., Li, J., Wu, B., Wang, J., Zhang, Y., Guo, H.: Benchmarking and improving detail image caption. arXiv preprint arXiv:2405.19092 (2024)"},{"key":"5_CR17","unstructured":"Herdade, S., Kappeler, A., Boakye, K., Soares, J.: Image captioning: transforming objects into words. In: NeurIPS (2019)"},{"key":"5_CR18","doi-asserted-by":"crossref","unstructured":"Hessel, J., Holtzman, A., Forbes, M., Bras, R.L., Choi, Y.: CLIPScore: a reference-free evaluation metric for image captioning. In: EMNLP (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.595"},{"key":"5_CR19","doi-asserted-by":"publisher","first-page":"853","DOI":"10.1613\/jair.3994","volume":"47","author":"M Hodosh","year":"2013","unstructured":"Hodosh, M., Young, P., Hockenmaier, J.: Framing image description as a ranking task: data, models and evaluation metrics. JAIR 47, 853\u2013899 (2013)","journal-title":"JAIR"},{"key":"5_CR20","doi-asserted-by":"crossref","unstructured":"Huang, L., Wang, W., Chen, J., Wei, X.Y.: Attention on attention for image captioning. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00473"},{"key":"5_CR21","doi-asserted-by":"crossref","unstructured":"Jiang, M., Hu, J., Huang, Q., Zhang, L., Diesner, J., Gao, J.: REO-Relevance, extraness, omission: a fine-grained evaluation for image captioning. In: EMNLP (2019)","DOI":"10.18653\/v1\/D19-1156"},{"key":"5_CR22","doi-asserted-by":"crossref","unstructured":"Jiang, M., et al.: TIGEr: text-to-image grounding for image caption evaluation. In: EMNLP (2019)","DOI":"10.18653\/v1\/D19-1220"},{"key":"5_CR23","doi-asserted-by":"crossref","unstructured":"Karpathy, A., Fei-Fei, L.: Deep visual-semantic alignments for generating image descriptions. In: CVPR (2015)","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"5_CR24","unstructured":"Kim, J.H., Kim, Y., Lee, J., Yoo, K.M., Lee, S.W.: Mutual information divergence: a unified metric for multimodal generative models. In: NeurIPS (2022)"},{"key":"5_CR25","unstructured":"Lauren\u00e7on, H., et al.: OBELICS: an open web-scale filtered dataset of interleaved image-text documents. In: NeurIPS (2023)"},{"key":"5_CR26","doi-asserted-by":"crossref","unstructured":"Lee, H., Yoon, S., Dernoncourt, F., Bui, T., Jung, K.: UMIC: an unreferenced metric for image captioning via contrastive learning. In: ACL (2021)","DOI":"10.18653\/v1\/2021.acl-short.29"},{"key":"5_CR27","doi-asserted-by":"crossref","unstructured":"Lee, H., Yoon, S., Dernoncourt, F., Kim, D.S., Bui, T., Jung, K.: ViLBERTScore: evaluating image caption using vision-and-language BERT. In: EMNLP Workshops (2020)","DOI":"10.18653\/v1\/2020.eval4nlp-1.4"},{"key":"5_CR28","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In: ICML (2023)"},{"key":"5_CR29","unstructured":"Li, J., Li, D., Xiong, C., Hoi, S.: BLIP: bootstrapping language-image pre-training for unified vision-language understanding and generation. In: ICML (2022)"},{"key":"5_CR30","unstructured":"Li, X., et\u00a0al.: What if we recaption billions of web images with LLaMA-3? arXiv preprint arXiv:2406.08478 (2024)"},{"key":"5_CR31","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"121","DOI":"10.1007\/978-3-030-58577-8_8","volume-title":"Computer Vision \u2013 ECCV 2020","author":"X Li","year":"2020","unstructured":"Li, X., et al.: Oscar: object-semantics aligned pre-training for vision-language tasks. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12375, pp. 121\u2013137. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58577-8_8"},{"key":"5_CR32","doi-asserted-by":"crossref","unstructured":"Li, Y., Pan, Y., Yao, T., Mei, T.: Comprehending and ordering semantics for image captioning. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01746"},{"key":"5_CR33","unstructured":"Lin, C.Y.: Rouge: a package for automatic evaluation of summaries. In: ACL Workshops (2004)"},{"key":"5_CR34","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"5_CR35","doi-asserted-by":"crossref","unstructured":"Liu, H., Li, C., Li, Y., Lee, Y.J.: Improved baselines with visual instruction tuning. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.02484"},{"key":"5_CR36","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning. In: NeurIPS (2023)"},{"key":"5_CR37","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: ICLR (2019)"},{"key":"5_CR38","unstructured":"Lu, J., Batra, D., Parikh, D., Lee, S.: ViLBERT: pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. In: NeurIPS (2019)"},{"key":"5_CR39","unstructured":"Oord, A.V.d., Li, Y., Vinyals, O.: Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748 (2018)"},{"key":"5_CR40","doi-asserted-by":"crossref","unstructured":"Pan, Y., Yao, T., Li, Y., Mei, T.: X-Linear attention networks for image captioning. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01098"},{"key":"5_CR41","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: BLEU: a method for automatic evaluation of machine translation. In: ACL (2002)","DOI":"10.3115\/1073083.1073135"},{"key":"5_CR42","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: ICML (2021)"},{"issue":"8","key":"5_CR43","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I.: Language models are unsupervised multitask learners. OpenAI Blog 1(8), 9 (2019)","journal-title":"OpenAI Blog"},{"key":"5_CR44","doi-asserted-by":"crossref","unstructured":"Ramos, R., Martins, B., Elliott, D., Kementchedjhieva, Y.: SmallCap: lightweight image captioning prompted with retrieval augmentation. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00278"},{"key":"5_CR45","unstructured":"Rashtchian, C., Young, P., Hodosh, M., Hockenmaier, J.: Collecting image annotations using amazon\u2019s mechanical Turk. In: NAACL Workshops (2010)"},{"key":"5_CR46","doi-asserted-by":"crossref","unstructured":"Saito, K., et al.: Pic2Word: mapping pictures to words for zero-shot composed image retrieval. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01850"},{"key":"5_CR47","doi-asserted-by":"crossref","unstructured":"Sarto, S., Barraco, M., Cornia, M., Baraldi, L., Cucchiara, R.: Positive-augmented contrastive learning for image and video captioning evaluation. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00668"},{"key":"5_CR48","doi-asserted-by":"crossref","unstructured":"Sarto, S., Cornia, M., Baraldi, L., Cucchiara, R.: Retrieval-augmented transformer for image captioning. In: CBMI (2022)","DOI":"10.1145\/3549555.3549585"},{"key":"5_CR49","doi-asserted-by":"crossref","unstructured":"Shekhar, R., et al.: FOIL it! find one mismatch between image and language caption. In: ACL (2017)","DOI":"10.18653\/v1\/P17-1024"},{"key":"5_CR50","doi-asserted-by":"crossref","unstructured":"Shi, Y., et al.: EMScore: evaluating video captioning via coarse-grained and fine-grained embedding matching. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01740"},{"key":"5_CR51","doi-asserted-by":"crossref","unstructured":"Tewel, Y., Shalev, Y., Schwartz, I., Wolf, L.: Zerocap: zero-shot image-to-text generation for visual-semantic arithmetic. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01739"},{"key":"5_CR52","unstructured":"Touvron, H., et al.: LLaMA: open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)"},{"key":"5_CR53","unstructured":"Touvron, H., et\u00a0al.: Llama 2: open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288 (2023)"},{"key":"5_CR54","unstructured":"Vaswani, A., et al.: Attention is all you need. In: NeurIPS (2017)"},{"key":"5_CR55","doi-asserted-by":"crossref","unstructured":"Vedantam, R., Lawrence\u00a0Zitnick, C., Parikh, D.: CIDEr: consensus-based image description evaluation. In: CVPR (2015)","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"5_CR56","doi-asserted-by":"crossref","unstructured":"Vinyals, O., Toshev, A., Bengio, S., Erhan, D.: Show and tell: a neural image caption generator. In: CVPR (2015)","DOI":"10.1109\/CVPR.2015.7298935"},{"key":"5_CR57","doi-asserted-by":"crossref","unstructured":"Wang, S., Yao, Z., Wang, R., Wu, Z., Chen, X.: FAIEr: fidelity and adequacy ensured image caption evaluation. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01383"},{"key":"5_CR58","unstructured":"Xu, K., et al.: Show, attend and tell: Neural image caption generation with visual attention. In: ICML (2015)"},{"key":"5_CR59","doi-asserted-by":"crossref","unstructured":"Yang, X., Tang, K., Zhang, H., Cai, J.: Auto-encoding scene graphs for image captioning. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.01094"},{"key":"5_CR60","doi-asserted-by":"crossref","unstructured":"Yi, Y., Deng, H., Hu, J.: Improving image captioning evaluation by considerg inter refinerences variance. In: ACL (2020)","DOI":"10.18653\/v1\/2020.acl-main.93"},{"key":"5_CR61","doi-asserted-by":"publisher","first-page":"67","DOI":"10.1162\/tacl_a_00166","volume":"2","author":"P Young","year":"2014","unstructured":"Young, P., Lai, A., Hodosh, M., Hockenmaier, J.: From image descriptions to visual denotations: new similarity metrics for semantic inference over event descriptions. TACL 2, 67\u201378 (2014)","journal-title":"TACL"},{"key":"5_CR62","doi-asserted-by":"crossref","unstructured":"Zhang, P., et al.: VinVL: revisiting visual representations in vision-language models. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00553"},{"key":"5_CR63","unstructured":"Zhang, T., Kishore, V., Wu, F., Weinberger, K.Q., Artzi, Y.: BERTScore: evaluating text generation with BERT. In: ICLR (2020)"},{"key":"5_CR64","doi-asserted-by":"crossref","unstructured":"Zhu, W., Wang, X.E., Yan, A., Eckstein, M., Wang, W.Y.: ImaginE: an imagination-based automatic evaluation metric for natural language generation. In: EACL (2023)","DOI":"10.18653\/v1\/2023.findings-eacl.6"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73229-4_5","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,24]],"date-time":"2024-10-24T15:04:56Z","timestamp":1729782296000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73229-4_5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,25]]},"ISBN":["9783031732287","9783031732294"],"references-count":64,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73229-4_5","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,10,25]]},"assertion":[{"value":"25 October 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}