{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,25]],"date-time":"2025-09-25T16:01:43Z","timestamp":1758816103038,"version":"3.40.3"},"publisher-location":"Cham","reference-count":35,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030687793"},{"type":"electronic","value":"9783030687809"}],"license":[{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021]]},"DOI":"10.1007\/978-3-030-68780-9_32","type":"book-chapter","created":{"date-parts":[[2021,2,24]],"date-time":"2021-02-24T17:04:13Z","timestamp":1614186253000},"page":"381-395","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":14,"title":["Fusion Models for Improved Image Captioning"],"prefix":"10.1007","author":[{"given":"Marimuthu","family":"Kalimuthu","sequence":"first","affiliation":[]},{"given":"Aditya","family":"Mogadala","sequence":"additional","affiliation":[]},{"given":"Marius","family":"Mosbach","sequence":"additional","affiliation":[]},{"given":"Dietrich","family":"Klakow","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,2,25]]},"reference":[{"key":"32_CR1","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"382","DOI":"10.1007\/978-3-319-46454-1_24","volume-title":"Computer Vision \u2013 ECCV 2016","author":"P Anderson","year":"2016","unstructured":"Anderson, P., Fernando, B., Johnson, M., Gould, S.: SPICE: semantic propositional image caption evaluation. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9909, pp. 382\u2013398. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46454-1_24"},{"key":"32_CR2","unstructured":"Banerjee, S., Lavie, A.: METEOR: an automatic metric for MT evaluation with improved correlation with human judgments. In: Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and\/or Summarization, pp. 65\u201372. Association for Computational Linguistics (2005)"},{"key":"32_CR3","first-page":"1137","volume":"3","author":"Y Bengio","year":"2003","unstructured":"Bengio, Y., Ducharme, R., Vincent, P., Janvin, C.: A neural probabilistic language model. J. Mach. Learn. Res. 3, 1137\u20131155 (2003)","journal-title":"J. Mach. Learn. Res."},{"key":"32_CR4","unstructured":"Brown, T.B., et al.: Language models are few-shot learners. CoRR abs\/2005.14165 (2020), https:\/\/arxiv.org\/abs\/2005.14165"},{"key":"32_CR5","unstructured":"Callison-Burch, C., Osborne, M., Koehn, P.: Re-evaluating the role of Bleu in machine translation research. In: 11th Conference EACL, Trento, Italy (2006)"},{"key":"32_CR6","doi-asserted-by":"crossref","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. CoRR abs\/2005.12872 (2020)","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"32_CR7","doi-asserted-by":"crossref","unstructured":"Cho, J., et al.: Language model integration based on memory control for sequence to sequence speech recognition. In: ICASSP, Brighton, United Kingdom, pp. 6191\u20136195 (2019)","DOI":"10.1109\/ICASSP.2019.8683380"},{"key":"32_CR8","doi-asserted-by":"crossref","unstructured":"Cho, K., et al.: Learning phrase representations using RNN encoder-decoder for statistical machine translation. In: Proceedings of EMNLP, pp. 1724\u20131734 (2014)","DOI":"10.3115\/v1\/D14-1179"},{"key":"32_CR9","unstructured":"Dauphin, Y.N., Fan, A., Auli, M., Grangier, D.: Language modeling with gated convolutional networks. In: Proceedings of the 34th ICML, pp. 933\u2013941 (2017)"},{"key":"32_CR10","doi-asserted-by":"publisher","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. In: Proceedings of NAACL 2019, Minneapolis, pp. 4171\u20134186 (2019). https:\/\/doi.org\/10.18653\/v1\/n19-1423","DOI":"10.18653\/v1\/n19-1423"},{"key":"32_CR11","doi-asserted-by":"crossref","unstructured":"Fan, A., Lewis, M., Dauphin, Y.: Hierarchical neural story generation. In: Proceedings of the 56th Annual Meeting of ACL 2018, Melbourne, pp. 889\u2013898 (2018)","DOI":"10.18653\/v1\/P18-1082"},{"key":"32_CR12","unstructured":"G\u00fcl\u00e7ehre, \u00c7., et al.: On using monolingual corpora in neural machine translation. CoRR abs\/1503.03535 (2015), http:\/\/arxiv.org\/abs\/1503.03535"},{"key":"32_CR13","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on CVPR 2016, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"32_CR14","unstructured":"Kalimuthu, M., Nunnari, F., Sonntag, D.: A competitive deep neural network approach for the imageclefmed caption 2020 task. In: Working Notes of CLEF 2020, Thessaloniki. CEUR Workshop Proceedings, vol. 2696. CEUR-WS.org (2020)"},{"key":"32_CR15","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. In: 3rd International Conference on Learning Representations, ICLR, San Diego (2015)"},{"key":"32_CR16","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. In: NIPS 2012, Lake Tahoe, pp. 1106\u20131114 (2012)"},{"key":"32_CR17","unstructured":"Lin, C.Y.: ROUGE: a package for automatic evaluation of summaries. In: Text Summarization Branches Out, pp. 74\u201381. Assoc. for Comp. Linguistics (2004)"},{"key":"32_CR18","doi-asserted-by":"crossref","unstructured":"Lu, X., Wang, B., Zheng, X., Li, X.: Exploring models and data for remote sensing image caption generation. Trans. Geosci. Remote Sens. 56, 2183\u20132195 (2017)","DOI":"10.1109\/TGRS.2017.2776321"},{"key":"32_CR19","unstructured":"Merity, S., Keskar, N.S., Socher, R.: Regularizing and optimizing LSTM language models. In: Proceedings of ICLR 2018, Vancouver, Conference Track Proceedings (2018)"},{"key":"32_CR20","doi-asserted-by":"crossref","unstructured":"Mikolov, T., Karafi\u00e1t, M., Burget, L., Cernock\u00fd, J., Khudanpur, S.: Recurrent neural network based language model. In: INTERSPEECH, pp. 1045\u20131048 (2010)","DOI":"10.1109\/ICASSP.2011.5947611"},{"key":"32_CR21","unstructured":"Mogadala, A., Kalimuthu, M., Klakow, D.: Trends in integration of vision and language research: a survey of tasks, datasets, and methods. CoRR abs\/1907.09358 (2019), http:\/\/arxiv.org\/abs\/1907.09358, (Accepted at the Journal of AI Research)"},{"key":"32_CR22","unstructured":"Mosbach, M., Andriushchenko, M., Klakow, D.: On the stability of fine-tuning BERT: misconceptions, explanations, and strong baselines. CoRR abs\/2006.04884 (2020), https:\/\/arxiv.org\/abs\/2006.04884"},{"key":"32_CR23","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: BLEU: a method for automatic evaluation of machine translation. In: Proceedings of ACL, pp. 311\u2013318 (2002)","DOI":"10.3115\/1073083.1073135"},{"key":"32_CR24","first-page":"8026","volume":"2019","author":"A Paszke","year":"2019","unstructured":"Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., et al.: PyTorch: an imperative style, high-performance deep learning library. NeurIPS 2019, 8026\u20138037 (2019)","journal-title":"NeurIPS"},{"key":"32_CR25","unstructured":"Pelka, O., Friedrich, C.M., Garc\u0131a Seco de Herrera, A., M\u00fcller, H.: Overview of the imageclefmed 2020 concept prediction task: medical image understanding. In: CLEF2020 Working Notes, CEUR Workshop Proceedings, vol. 2696 (2020)"},{"issue":"8","key":"32_CR26","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I.: Language models are unsupervised multitask learners. OpenAI blog 1(8), 9 (2019)","journal-title":"OpenAI blog"},{"key":"32_CR27","doi-asserted-by":"crossref","unstructured":"Rohrbach, A., Hendricks, L.A., Burns, K., Darrell, T., Saenko, K.: Object hallucination in image captioning. In: Proceedings of EMNLP Brussels, pp. 4035\u20134045 (2018)","DOI":"10.18653\/v1\/D18-1437"},{"key":"32_CR28","unstructured":"Sammani, F., Elsayed, M.: Look and modify: modification networks for image captioning. In: 30th BMVC 2019, Cardiff, UK, p. 75. BMVA Press (2019)"},{"key":"32_CR29","doi-asserted-by":"crossref","unstructured":"Sammani, F., Melas-Kyriazi, L.: Show, edit and tell: a framework for editing image captions. In: Proceedings of CVPR 2020, Seattle, pp. 4808\u20134816. IEEE (2020)","DOI":"10.1109\/CVPR42600.2020.00486"},{"key":"32_CR30","doi-asserted-by":"crossref","unstructured":"Sriram, A., Jun, H., Satheesh, S., Coates, A.: Cold fusion: training seq2seq models together with language models. In: Proceedings of Interspeech 2018, pp. 387\u2013391 (2018)","DOI":"10.21437\/Interspeech.2018-1392"},{"key":"32_CR31","first-page":"5998","volume":"2017","author":"A Vaswani","year":"2017","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. NIPS 2017, 5998\u20136008 (2017)","journal-title":"NIPS"},{"key":"32_CR32","doi-asserted-by":"crossref","unstructured":"Vedantam, R., Lawrence Zitnick, C., Parikh, D.: CIDEr: consensus-based image description evaluation. In: Proceedings of CVPR 2015, pp. 4566\u20134575. IEEE CS (2015)","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"32_CR33","doi-asserted-by":"crossref","unstructured":"Vinyals, O., Toshev, A., Bengio, S., Erhan, D.: Show and tell: aeural image caption generator. In: CVPR, pp. 3156\u20133164. IEEE Computer Society (2015)","DOI":"10.1109\/CVPR.2015.7298935"},{"key":"32_CR34","unstructured":"Wu, Y., et al.: Google\u2019s neural machine translation system: bridging the gap between human and machine translation. CoRR abs\/1609.08144 (2016), http:\/\/arxiv.org\/abs\/1609.08144"},{"key":"32_CR35","unstructured":"Xu, K., et al.: Show, attend and tell: neural image caption generation with visual attention. In: International Conference on Machine Learning, pp. 2048\u20132057 (2015)"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition. ICPR International Workshops and Challenges"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-68780-9_32","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,2,24]],"date-time":"2021-02-24T17:37:19Z","timestamp":1614188239000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-030-68780-9_32"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021]]},"ISBN":["9783030687793","9783030687809"],"references-count":35,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-68780-9_32","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2021]]},"assertion":[{"value":"25 February 2021","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICPR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Pattern Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2021","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"10 January 2021","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"11 January 2021","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ICPR2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.icpr2020.it\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}