{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,25]],"date-time":"2025-12-25T07:27:42Z","timestamp":1766647662688,"version":"3.30.2"},"reference-count":43,"publisher":"Springer Science and Business Media LLC","issue":"6","license":[{"start":{"date-parts":[[2024,11,25]],"date-time":"2024-11-25T00:00:00Z","timestamp":1732492800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,25]],"date-time":"2024-11-25T00:00:00Z","timestamp":1732492800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2024,12]]},"DOI":"10.1007\/s00530-024-01573-9","type":"journal-article","created":{"date-parts":[[2024,11,25]],"date-time":"2024-11-25T15:05:44Z","timestamp":1732547144000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["A visual question answering model based on image captioning"],"prefix":"10.1007","volume":"30","author":[{"given":"Kun","family":"Zhou","sequence":"first","affiliation":[]},{"given":"Qiongjie","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Dexin","family":"Zhao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,25]]},"reference":[{"key":"1573_CR1","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2023.3322868","author":"X Huang","year":"2023","unstructured":"Huang, X., Gong, H.: A dual-attention learning network with word and sentence embedding for medical visual question answering. IEEE Trans. Med. Imaging (2023). https:\/\/doi.org\/10.1109\/TMI.2023.3322868","journal-title":"IEEE Trans. Med. Imaging"},{"issue":"3","key":"1573_CR2","doi-asserted-by":"publisher","first-page":"683","DOI":"10.1016\/j.ijtst.2022.06.002","volume":"12","author":"RK Megalingam","year":"2023","unstructured":"Megalingam, R.K., Thanigundala, K., Musani, S.R., Nidamanuru, H., Gadde, L.: Indian traffic sign detection and recognition using deep learning. Int. J Transp. Sci. Technol. 12(3), 683\u2013699 (2023)","journal-title":"Int. J Transp. Sci. Technol."},{"key":"1573_CR3","doi-asserted-by":"crossref","unstructured":"Gupta, M., Asthana, P., Singh, P.: \u201cEduvi: An educational-based visual question answering and image captioning system for enhancing the knowledge of primary level students,\u201d (2023)","DOI":"10.21203\/rs.3.rs-2594097\/v1"},{"key":"1573_CR4","doi-asserted-by":"publisher","first-page":"261","DOI":"10.1016\/j.jmsy.2020.07.011","volume":"58","author":"T Wang","year":"2021","unstructured":"Wang, T., Li, J., Kong, Z., Liu, X., Snoussi, H., Lv, H.: Digital twin improved via visual question answering for vision-language interactive mode in human-machine collaboration. J. Manuf. Syst. 58, 261\u2013269 (2021)","journal-title":"J. Manuf. Syst."},{"issue":"4","key":"1573_CR5","doi-asserted-by":"publisher","first-page":"652","DOI":"10.1109\/TPAMI.2016.2587640","volume":"39","author":"O Vinyals","year":"2016","unstructured":"Vinyals, O., Toshev, A., Bengio, S., Erhan, D.: Show and tell: Lessons learned from the 2015 mscoco image captioning challenge. IEEE Trans. Pattern Anal. Mach. Intell. 39(4), 652\u2013663 (2016)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"1573_CR6","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1706.03762","author":"A Vaswani","year":"2017","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, \u0141, Polosukhin, I.: Attention is all you need. Adv. Neural Inform. Process. Syst. (2017). https:\/\/doi.org\/10.48550\/arXiv.1706.03762","journal-title":"Adv. Neural Inform. Process. Syst."},{"key":"1573_CR7","unstructured":"Xu, K., Ba, J., Kiros, R., Cho, K., Courville, A., Salakhudinov, R., Zemel, R., Bengio, Y.: \u201cShow, attend and tell: Neural image caption generation with visual attention,\u201d In: International conference on machine learning. PMLR, pp. 2048\u20132057, (2015)"},{"key":"1573_CR8","doi-asserted-by":"crossref","unstructured":"Lu, J., Xiong, C., Parikh, D., Socher, R.: \u201cKnowing when to look: Adaptive attention via a visual sentinel for image captioning,\u201d In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 375\u2013383,(2017)","DOI":"10.1109\/CVPR.2017.345"},{"key":"1573_CR9","doi-asserted-by":"crossref","unstructured":"Anderson, P., He, X., Buehler, C., Teney, D., Johnson, M., Gould, S., Zhang, L.: \u201cBottom-up and top-down attention for image captioning and visual question answering,\u201d in Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 6077\u20136086 (2018)","DOI":"10.1109\/CVPR.2018.00636"},{"key":"1573_CR10","unstructured":"Yang, X., Tang, K., Zhang, H., Cai, J.: \u201cAuto-encoding scene graphs for image captioning,\u201d In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 10\u00a0685\u201310\u00a0694 (2019)"},{"key":"1573_CR11","unstructured":"Guo, L., Liu, J., Zhu, X., Yao, P., Lu, S., Lu, H.: \u201cNormalized and geometry-aware self-attention network for image captioning,\u201d in Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 10\u00a0327\u201310\u00a0336 (2020)"},{"key":"1573_CR12","doi-asserted-by":"crossref","unstructured":"Lin, T.-Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Doll\u00e1r, P., Zitnick, C.L., Microsoft coco: Common objects in context,\u201d in Computer Vision-ECCV,: 13th European Conference, Zurich, Switzerland, September 6\u201312, 2014, Proceedings, Part V 13. Springer 2014, 740\u2013755 (2014)","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"1573_CR13","doi-asserted-by":"crossref","unstructured":"Yang, Z., He, X., Gao, J., Deng, L., Smola, A.: \u201cStacked attention networks for image question answering,\u201d In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 21\u201329 (2016)","DOI":"10.1109\/CVPR.2016.10"},{"key":"1573_CR14","unstructured":"Lu, J., Yang, J., Batra, D., Parikh, D.: \u201cHierarchical question-image co-attention for visual question answering,\u201d Adv. Neural Inform. Process. Syst. 29 (2016)"},{"key":"1573_CR15","doi-asserted-by":"crossref","unstructured":"Ben-Younes, H., Cadene, R., Cord, M., Thome, N.: \u201cMutan: Multimodal tucker fusion for visual question answering,\u201d In: Proceedings of the IEEE international conference on computer vision, pp. 2612\u20132620 (2017)","DOI":"10.1109\/ICCV.2017.285"},{"key":"1573_CR16","first-page":"1571","volume":"31","author":"J-H Kim","year":"2018","unstructured":"Kim, J.-H., Jun, J., Zhang, B.-T.: Bilinear attention networks. Adv. Neural Inform. Process. Syst. 31, 1571\u20131581 (2018)","journal-title":"Adv. Neural Inform. Process. Syst."},{"issue":"01","key":"1573_CR17","first-page":"8102","volume":"33","author":"H Ben-Younes","year":"2019","unstructured":"Ben-Younes, H., Cadene, R., Thome, N., Cord, M.: Block: Bilinear superdiagonal fusion for visual question answering and visual relationship detection. Proceed. AAAI Conf. Artif. Intell. 33(01), 8102\u20138109 (2019)","journal-title":"Proceed. AAAI Conf. Artif. Intell."},{"key":"1573_CR18","unstructured":"Gao, P., You, H., Zhang, Z., Wang, X., Li, H.: \u201cMulti-modality latent interaction network for visual question answering,\u201d In: Proceedings of the IEEE\/CVF international conference on computer vision, pp. 5825\u20135835 (2019)"},{"key":"1573_CR19","first-page":"2507","volume":"35","author":"P Lu","year":"2022","unstructured":"Lu, P., Mishra, S., Xia, T., Qiu, L., Chang, K.-W., Zhu, S.-C., Tafjord, O., Clark, P., Kalyan, A.: Learn to explain: multimodal reasoning via thought chains for science question answering. Adv. Neural. Inf. Process. Syst. 35, 2507\u20132521 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"1573_CR20","doi-asserted-by":"crossref","unstructured":"Wu, J., Hu, Z., Mooney, R.: \u201cGenerating question relevant captions to aid visual question answering,\u201d in Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics. Florence, Italy: Association for Computational Linguistics, pp. 3585\u20133594 (2019)","DOI":"10.18653\/v1\/P19-1348"},{"key":"1573_CR21","doi-asserted-by":"crossref","unstructured":"Li, P., Yang, Q., Geng, X., Zhou, W., Ding, Z., Nian, Y.: \u201cExploring diverse methods in visual question answering,\u201d arXiv preprint arXiv:2404.13565, (2024)","DOI":"10.1109\/ICECAI62591.2024.10674838"},{"key":"1573_CR22","doi-asserted-by":"crossref","unstructured":"Vosoughi, A., Deng, S., Zhang, S., Tian, Y., Xu, C., Luo, J.: \u201cCross modality bias in visual question answering: A causal view with possible worlds vqa,\u201d IEEE Trans. Multimed. pp. 1\u201316, (2024)","DOI":"10.1109\/TMM.2024.3380259"},{"issue":"3","key":"1573_CR23","first-page":"2286","volume":"35","author":"Y Luo","year":"2021","unstructured":"Luo, Y., Ji, J., Sun, X., Cao, L., Wu, Y., Huang, F., Lin, C.-W., Ji, R.: Dual-level collaborative transformer for image captioning. Proceed. AAAI Conf. Artif. Intell. 35(3), 2286\u20132293 (2021)","journal-title":"Proceed. AAAI Conf. Artif. Intell."},{"key":"1573_CR24","doi-asserted-by":"publisher","first-page":"8828","DOI":"10.1109\/TMM.2023.3242142","volume":"25","author":"S Yue","year":"2023","unstructured":"Yue, S., Tu, Y., Li, L., Yang, Y., Gao, S., Yu, Z.: I3n: Intra-and inter-representation interaction network for change captioning. IEEE Trans. Multim. 25, 8828\u201341 (2023)","journal-title":"IEEE Trans. Multim."},{"key":"1573_CR25","unstructured":"Guo, L., Liu, J., Zhu, X., Yao, P., Lu, S., Lu, H.: \u201cNormalized and geometry-aware self-attention network for image captioning,\u201d In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 10\u00a0327\u201310\u00a0336 (2020)"},{"key":"1573_CR26","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: \u201cDeep residual learning for image recognition,\u201d In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"1573_CR27","doi-asserted-by":"crossref","unstructured":"Antol, S., Agrawal, A., Lu, J., Mitchell, M., Batra, D., Zitnick, C.L., Parikh, D.: \u201cVqa: Visual question answering,\u201d In: Proceedings of the IEEE international conference on computer vision, pp. 2425\u20132433 (2015)","DOI":"10.1109\/ICCV.2015.279"},{"key":"1573_CR28","unstructured":"Zhou, B., Tian, Y., Sukhbaatar, S., Szlam, A., Fergus, R.: \u201cSimple baseline for visual question answering,\u201d arXiv preprint arXiv:1512.02167, (2015)"},{"key":"1573_CR29","doi-asserted-by":"crossref","unstructured":"Yang, Z. He, X., Gao, J., Deng, L., Smola, A.: \u201cStacked attention networks for image question answering,\u201d In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 21\u201329 (2016)","DOI":"10.1109\/CVPR.2016.10"},{"key":"1573_CR30","unstructured":"Noh H., Han, B.: \u201cTraining recurrent answering units with joint loss minimization for vqa,\u201d arXiv preprint arXiv:1606.03647, (2016)"},{"key":"1573_CR31","doi-asserted-by":"crossref","unstructured":"Nam, H., Ha, J.-W., Kim, J.: \u201cDual attention networks for multimodal reasoning and matching,\u201d In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 299\u2013307, (2017)","DOI":"10.1109\/CVPR.2017.232"},{"key":"1573_CR32","unstructured":"Kazemi V., Elqursh, A.: \u201cShow, ask, attend, and answer: A strong baseline for visual question answering,\u201d arXiv preprint arXiv:1704.03162, (2017)"},{"key":"1573_CR33","doi-asserted-by":"crossref","unstructured":"Yu, D., Fu, J., Mei, T., Rui, Y.: \u201cMulti-level attention networks for visual question answering,\u201d In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 4709\u20134717 (2017)","DOI":"10.1109\/CVPR.2017.446"},{"key":"1573_CR34","unstructured":"Kim, J.-H., On, K.-W., Lim, W., Kim, J., Ha, J.-W., Zhang, B.-T.: \u201cHadamard product for low-rank bilinear pooling,\u201d arXiv preprint arXiv:1610.04325, (2016)"},{"key":"1573_CR35","doi-asserted-by":"crossref","unstructured":"Nguyen D.-K., Okatani, T.: \u201cImproved fusion of visual and language representations by dense symmetric co-attention for visual question answering,\u201d in Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 6087\u20136096 (2018)","DOI":"10.1109\/CVPR.2018.00637"},{"key":"1573_CR36","doi-asserted-by":"publisher","first-page":"116","DOI":"10.1016\/j.inffus.2019.08.009","volume":"55","author":"W Zhang","year":"2020","unstructured":"Zhang, W., Yu, J., Hu, H., Hu, H., Qin, Z.: Multimodal feature fusion by relational reasoning and attention for visual question answering. Inform. Fusion 55, 116\u2013126 (2020)","journal-title":"Inform. Fusion"},{"issue":"4","key":"1573_CR37","first-page":"1644","volume":"34","author":"L Peng","year":"2020","unstructured":"Peng, L., Yang, Y., Zhang, X., Ji, Y., Lu, H., Shen, H.T.: Answer again: Improving vqa with cascaded-answering model. IEEE Trans. Knowl. Data Eng. 34(4), 1644\u20131655 (2020)","journal-title":"IEEE Trans. Knowl. Data Eng."},{"key":"1573_CR38","doi-asserted-by":"crossref","unstructured":"Goyal, Y., Khot, T., Summers-Stay, D., Batra, D., Parikh, D.: \u201cMaking the v in vqa matter: Elevating the role of image understanding in visual question answering,\u201d In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 6904\u20136913 (2017)","DOI":"10.1109\/CVPR.2017.670"},{"key":"1573_CR39","doi-asserted-by":"crossref","unstructured":"Fukui, A., Park, D.H., Yang, D., Rohrbach, A., Darrell, T., Rohrbach, M.: \u201cMultimodal compact bilinear pooling for visual question answering and visual grounding,\u201d arXiv preprint arXiv:1606.01847, (2016)","DOI":"10.18653\/v1\/D16-1044"},{"key":"1573_CR40","first-page":"4223","volume":"2018","author":"D Teney","year":"2017","unstructured":"Teney, D., Anderson, P., He, X., Van Den Hengel, A.: Tips and tricks for visual question answering: Learnings from the,: challenge. Proc. IEEE Conf. Comput. Vis. Pattern Recognit. 2018, 4223\u20134232 (2017)","journal-title":"Proc. IEEE Conf. Comput. Vis. Pattern Recognit."},{"key":"1573_CR41","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: \u201cBlip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models,\u201d In: International conference on machine learning. PMLR, pp. 19\u00a0730\u201319\u00a0742 (2023)"},{"key":"1573_CR42","doi-asserted-by":"publisher","first-page":"334","DOI":"10.1016\/j.patrec.2020.02.031","volume":"133","author":"W Li","year":"2020","unstructured":"Li, W., Sun, J., Liu, G., Zhao, L., Fang, X.: Visual question answering with attention transfer and a cross-modal gating mechanism. Pattern Recogn. Lett. 133, 334\u2013340 (2020)","journal-title":"Pattern Recogn. Lett."},{"key":"1573_CR43","doi-asserted-by":"publisher","first-page":"158","DOI":"10.1016\/j.neunet.2021.02.001","volume":"139","author":"J-J Kim","year":"2021","unstructured":"Kim, J.-J., Lee, D.-G., Wu, J., Jung, H.-G., Lee, S.-W.: Visual question answering based on local-scene-aware referring expression generation. Neural Netw. 139, 158\u2013167 (2021)","journal-title":"Neural Netw."}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01573-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-024-01573-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01573-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,16]],"date-time":"2024-12-16T09:18:27Z","timestamp":1734340707000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-024-01573-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,25]]},"references-count":43,"journal-issue":{"issue":"6","published-print":{"date-parts":[[2024,12]]}},"alternative-id":["1573"],"URL":"https:\/\/doi.org\/10.1007\/s00530-024-01573-9","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"type":"print","value":"0942-4962"},{"type":"electronic","value":"1432-1882"}],"subject":[],"published":{"date-parts":[[2024,11,25]]},"assertion":[{"value":"16 February 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 November 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"25 November 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"360"}}