{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,2]],"date-time":"2025-10-02T06:09:20Z","timestamp":1759385360913,"version":"3.37.3"},"reference-count":41,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2023,6,5]],"date-time":"2023-06-05T00:00:00Z","timestamp":1685923200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,6,5]],"date-time":"2023-06-05T00:00:00Z","timestamp":1685923200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U1911401"],"award-info":[{"award-number":["U1911401"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Key Project of Science and Technology Innovation 2030 supported by the Ministry of Science and Technology of China","award":["ZDI135-96"],"award-info":[{"award-number":["ZDI135-96"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"published-print":{"date-parts":[[2024,1]]},"DOI":"10.1007\/s11042-023-15418-6","type":"journal-article","created":{"date-parts":[[2023,6,5]],"date-time":"2023-06-05T14:03:00Z","timestamp":1685973780000},"page":"7085-7096","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["OECA-Net: A co-attention network for visual question answering based on OCR scene text feature enhancement"],"prefix":"10.1007","volume":"83","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8615-139X","authenticated-orcid":false,"given":"Feng","family":"Yan","sequence":"first","affiliation":[]},{"given":"Wushouer","family":"Silamu","sequence":"additional","affiliation":[]},{"given":"Yachuang","family":"Chai","sequence":"additional","affiliation":[]},{"given":"Yanbing","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,6,5]]},"reference":[{"key":"15418_CR1","doi-asserted-by":"crossref","unstructured":"Anderson P, He X, Buehler C, Teney D, Johnson M, Gould S, Zhang L (2018) Bottom-up and top-down attention for image captioning and visual question answering. In: Proceedings of the IEEE Conference on computer vision and pattern recognition, pp 6077\u20136086","DOI":"10.1109\/CVPR.2018.00636"},{"key":"15418_CR2","doi-asserted-by":"crossref","unstructured":"Antol S, Agrawal A, Lu J, Mitchell M, Batra D, Zitnick CL, Parikh D (2015) Vqa: Visual question answering. In: Proceedings of the IEEE International conference on computer vision, pp 2425\u20132433","DOI":"10.1109\/ICCV.2015.279"},{"key":"15418_CR3","unstructured":"Ba JL, Kiros JR, Hinton GE (2016) Layer normalization. arXiv:1607.06450"},{"key":"15418_CR4","doi-asserted-by":"crossref","unstructured":"Ben-Younes H, Cadene R, Cord M, Thome N (2017) Mutan: Multimodal tucker fusion for visual question answering. In: Proceedings of the IEEE International conference on computer vision, pp 2612\u20132620","DOI":"10.1109\/ICCV.2017.285"},{"key":"15418_CR5","doi-asserted-by":"publisher","first-page":"135","DOI":"10.1162\/tacl_a_00051","volume":"5","author":"P Bojanowski","year":"2017","unstructured":"Bojanowski P, Grave E, Joulin A, Mikolov T (2017) Enriching word vectors with subword information. Trans Assoc Computat Linguistics 5:135\u2013146","journal-title":"Trans Assoc Computat Linguistics"},{"key":"15418_CR6","doi-asserted-by":"crossref","unstructured":"Borisyuk F, Gordo A, Sivakumar V (2018) Rosetta: Large scale system for text detection and recognition in images. In: Proceedings of the 24th ACM SIGKDD International conference on knowledge discovery & data mining, pp 71\u201379","DOI":"10.1145\/3219819.3219861"},{"key":"15418_CR7","doi-asserted-by":"crossref","unstructured":"Cadene R, Ben-Younes H, Cord M, Thome N (2019) Murel: Multimodal relational reasoning for visual question answering. In: Proceedings of the IEEE\/CVF Conference on computer vision and pattern recognition, pp 1989\u20131998","DOI":"10.1109\/CVPR.2019.00209"},{"key":"15418_CR8","doi-asserted-by":"publisher","first-page":"108980","DOI":"10.1016\/j.patcog.2022.108980","volume":"132","author":"C Chen","year":"2022","unstructured":"Chen C, Han D, Chang C-C (2022) Caan:Context-aware attention network for visual question answering. Pattern Recogn 132:108980","journal-title":"Pattern Recogn"},{"key":"15418_CR9","doi-asserted-by":"crossref","unstructured":"Chen Y-C, Li L, Yu L, El Kholy A, Ahmed F, Gan Z, Cheng Y, Liu J (2020) Uniter: Universal image-text representation learning. In: Computer Vision\u2013ECCV 2020: 16th European conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part XXX, pp 104\u2013120. Springer","DOI":"10.1007\/978-3-030-58577-8_7"},{"key":"15418_CR10","unstructured":"Chen K, Wang J, Chen L-C, Gao H, Xu W, Nevatia R (2015) Abc-cnn:, An attention based convolutional neural network for visual question answering. arXiv:1511.05960"},{"key":"15418_CR11","doi-asserted-by":"publisher","unstructured":"Fukui A, Park DH, Yang D, Rohrbach A, Darrell T, Rohrbach M (2016) Multimodal compact bilinear pooling for visual question answering and visual grounding. In: Proceedings of the 2016 Conference on empirical methods in natural language processing. Association for computational linguistics, ???. https:\/\/doi.org\/10.18653\/v1\/d16-1044","DOI":"10.18653\/v1\/d16-1044"},{"key":"15418_CR12","doi-asserted-by":"crossref","unstructured":"Gao P, Jiang Z, You H, Lu P, Hoi SC, Wang X, Li H (2019) Dynamic fusion with intra-and inter-modality attention flow for visual question answering. In: Proceedings of the IEEE\/CVF Conference on computer vision and pattern recognition, pp 6639\u20136648","DOI":"10.1109\/CVPR.2019.00680"},{"key":"15418_CR13","doi-asserted-by":"crossref","unstructured":"Goyal Y, Khot T, Summers-Stay D, Batra D, Parikh D (2017) Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In: Proceedings of the IEEE Conference on computer vision and pattern recognition, pp 6904\u20136913","DOI":"10.1109\/CVPR.2017.670"},{"key":"15418_CR14","unstructured":"Guo M-H, Xu T-X, Liu J-J, Liu Z-N, Jiang P-T, Mu T-J, Zhang S-H, Martin RR, Cheng M-M, Hu S-M (2022) Attention mechanisms in computer vision: a survey. Computational Visual Media, pp 1\u201338"},{"issue":"8","key":"15418_CR15","doi-asserted-by":"publisher","first-page":"1735","DOI":"10.1162\/neco.1997.9.8.1735","volume":"9","author":"S Hochreiter","year":"1997","unstructured":"Hochreiter S, Schmidhuber J (1997) Long short-term memory. Neural Comput 9(8):1735\u20131780","journal-title":"Neural Comput"},{"key":"15418_CR16","doi-asserted-by":"crossref","unstructured":"Hu R, Singh A, Darrell T, Rohrbach M (2020) Iterative answer prediction with pointer-augmented multimodal transformers for textvqa. In: Proceedings of the IEEE\/CVF Conference on computer vision and pattern recognition, pp 9992\u201310002","DOI":"10.1109\/CVPR42600.2020.01001"},{"key":"15418_CR17","doi-asserted-by":"publisher","first-page":"14859","DOI":"10.1007\/s11042-017-5070-6","volume":"77","author":"S Jia","year":"2018","unstructured":"Jia S, Zhang Y (2018) Saliency-based deep convolutional neural network for no-reference image quality assessment. Multimed Tools Appl 77:14859\u201314872","journal-title":"Multimed Tools Appl"},{"key":"15418_CR18","doi-asserted-by":"crossref","unstructured":"Jiang X, Yu J, Qin Z, Zhuang Y, Zhang X, Hu Y, Wu Q (2020) Dualvd: an adaptive dual encoding model for deep visual understanding in visual dialogue. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol 34, pp 11125\u201311132","DOI":"10.1609\/aaai.v34i07.6769"},{"key":"15418_CR19","unstructured":"Kim J-H, Jun J, Zhang B-T (2018) Bilinear attention networks. arXiv:1805.07932"},{"key":"15418_CR20","unstructured":"Kim J-H, On K-W, Lim W, Kim J, Ha J-W, Zhang B-T (2016) Hadamard product for low-rank bilinear pooling. arXiv:1610.04325"},{"key":"15418_CR21","unstructured":"Li J, Li D, Xiong C, Hoi S (2022) Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In: International conference on machine learning, pp 12888\u201312900. PMLR"},{"key":"15418_CR22","unstructured":"Lu J, Batra D, Parikh D, Lee S (2019) Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. Advances in neural information processing systems, p 32"},{"issue":"8","key":"15418_CR23","doi-asserted-by":"publisher","first-page":"5705","DOI":"10.1007\/s10462-020-09832-7","volume":"53","author":"S Manmadhan","year":"2020","unstructured":"Manmadhan S, Kovoor BC (2020) Visual question answering: a state-of-the-art review. Artif Intell Rev 53(8):5705\u20135745","journal-title":"Artif Intell Rev"},{"key":"15418_CR24","unstructured":"Peng L, Yang Y, Wang Z, Huang Z, Shen HT (2020) Mra-net: Improving vqa via multi-modal relation attention network. IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"15418_CR25","doi-asserted-by":"crossref","unstructured":"Pennington J, Socher R, Manning CD (2014) Glove: Global vectors for word representation. In: Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp 1532\u20131543","DOI":"10.3115\/v1\/D14-1162"},{"key":"15418_CR26","doi-asserted-by":"crossref","unstructured":"Shahi TB, Sitaula C (2021) Natural language processing for nepali text: a review. Artif Intell Rev, pp 1\u201329","DOI":"10.1007\/s10462-021-10093-1"},{"key":"15418_CR27","doi-asserted-by":"crossref","unstructured":"Singh A, Natarajan V, Shah M, Jiang Y, Chen X, Batra D, Parikh D, Rohrbach M (2019) Towards vqa models that can read. In: Proceedings of the IEEE\/CVF Conference on computer vision and pattern recognition, pp 8317\u20138326","DOI":"10.1109\/CVPR.2019.00851"},{"key":"15418_CR28","doi-asserted-by":"crossref","unstructured":"Tan H, Bansal M (2019) Lxmert:, Learning cross-modality encoder representations from transformers. arXiv:1908.07490","DOI":"10.18653\/v1\/D19-1514"},{"key":"15418_CR29","doi-asserted-by":"crossref","unstructured":"Teney D, Anderson P, He X, Van Den Hengel A (2018) Tips and tricks for visual question answering: Learnings from the 2017 challenge. In: Proceedings of the IEEE Conference on computer vision and pattern recognition, pp 4223\u20134232","DOI":"10.1109\/CVPR.2018.00444"},{"key":"15418_CR30","unstructured":"Vaswani A, Shazeer N, Parmar N, Uszkoreit J, Jones L, Gomez AN, Kaiser L, Polosukhin I (2017) Attention is all you need. arXiv:1706.03762"},{"key":"15418_CR31","doi-asserted-by":"crossref","unstructured":"Vinyals O, Toshev A, Bengio S, Erhan D (2015) Show and tell: a neural image caption generator. In: Proceedings of the IEEE Conference on computer vision and pattern recognition, pp 3156\u20133164","DOI":"10.1109\/CVPR.2015.7298935"},{"key":"15418_CR32","unstructured":"Wang P, Yang A, Men R, Lin J, Bai S, Li Z, Ma J, Zhou C, Zhou J, Yang H (2022) Ofa: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In: International conference on machine learning, pp 23318\u201323340. PMLR"},{"issue":"3","key":"15418_CR33","doi-asserted-by":"publisher","first-page":"1045","DOI":"10.3390\/s22031045","volume":"22","author":"F Yan","year":"2022","unstructured":"Yan F, Silamu W, Li Y (2022) Deep modular bilinear attention network for visual question answering. Sensors 22(3):1045","journal-title":"Sensors"},{"issue":"9-10","key":"15418_CR34","doi-asserted-by":"publisher","first-page":"3097","DOI":"10.1007\/s00371-022-02524-z","volume":"38","author":"F Yan","year":"2022","unstructured":"Yan F, Silamu W, Li Y, Chai Y (2022) Spca-net: a based on spatial position relationship co-attention network for visual question answering. The Vis Comput 38(9-10):3097\u20133108","journal-title":"The Vis Comput"},{"key":"15418_CR35","doi-asserted-by":"crossref","unstructured":"Yang Z, He X, Gao J, Deng L, Smola A (2016) Stacked attention networks for image question answering. In: Proceedings of the IEEE Conference on computer vision and pattern recognition, pp 21\u201329","DOI":"10.1109\/CVPR.2016.10"},{"issue":"2s","key":"15418_CR36","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3316767","volume":"15","author":"D Yu","year":"2019","unstructured":"Yu D, Fu J, Tian X, Mei T (2019) Multi-source multi-level attention networks for visual question answering. ACM Transactions on Multimedia Computing, Communications, and Applications (TOMM) 15(2s):1\u201320","journal-title":"ACM Transactions on Multimedia Computing, Communications, and Applications (TOMM)"},{"key":"15418_CR37","doi-asserted-by":"crossref","unstructured":"Yu Z, Yu J, Cui Y, Tao D, Tian Q (2019) Deep modular co-attention networks for visual question answering. In: Proceedings of the IEEE\/CVF Conference on computer vision and pattern recognition, pp 6281\u20136290","DOI":"10.1109\/CVPR.2019.00644"},{"issue":"12","key":"15418_CR38","doi-asserted-by":"publisher","first-page":"5947","DOI":"10.1109\/TNNLS.2018.2817340","volume":"29","author":"Z Yu","year":"2018","unstructured":"Yu Z, Yu J, Xiang C, Fan J, Tao D (2018) Beyond bilinear: Generalized multimodal factorized high-order pooling for visual question answering. IEEE Trans Neural Netw Learn Syst 29(12):5947\u20135959. https:\/\/doi.org\/10.1109\/tnnls.2018.2817340","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"15418_CR39","doi-asserted-by":"crossref","unstructured":"Zhang S, Chen M, Chen J, Zou F, Li Y-F, Lu P (2021) Multimodal feature-wise co-attention method for visual question answering, vol 73","DOI":"10.1016\/j.inffus.2021.02.022"},{"key":"15418_CR40","doi-asserted-by":"publisher","first-page":"19033","DOI":"10.1109\/ACCESS.2020.2966827","volume":"8","author":"Y Zhang","year":"2020","unstructured":"Zhang Y, Hutchinson P, Lieven NA, Nunez-Yanez J (2020) Remaining useful life estimation using long short-term memory neural networks and deep fusion. IEEE Access 8:19033\u201319045","journal-title":"IEEE Access"},{"key":"15418_CR41","doi-asserted-by":"publisher","first-page":"106639","DOI":"10.1016\/j.knosys.2020.106639","volume":"212","author":"W Zhang","year":"2021","unstructured":"Zhang W, Yu J, Wang Y, Wang W (2021) Multimodal deep fusion for image question answering. Knowl-Based Syst 212:106639","journal-title":"Knowl-Based Syst"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-023-15418-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-023-15418-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-023-15418-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,8]],"date-time":"2024-01-08T07:06:46Z","timestamp":1704697606000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-023-15418-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6,5]]},"references-count":41,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2024,1]]}},"alternative-id":["15418"],"URL":"https:\/\/doi.org\/10.1007\/s11042-023-15418-6","relation":{},"ISSN":["1380-7501","1573-7721"],"issn-type":[{"type":"print","value":"1380-7501"},{"type":"electronic","value":"1573-7721"}],"subject":[],"published":{"date-parts":[[2023,6,5]]},"assertion":[{"value":"20 July 2022","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"15 February 2023","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"18 April 2023","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"5 June 2023","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"This article does not contain any studies with human participants or animals performed by any of the authors.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"<!--Emphasis Type='Bold' removed-->Ethics approval"}},{"value":"Not Applicable.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"<!--Emphasis Type='Bold' removed-->Consent for Participate"}},{"value":"All authors have read and agreed to the published version of the manuscript.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"<!--Emphasis Type='Bold' removed-->Consent for Publication"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":5,"name":"Ethics","group":{"name":"EthicsHeading","label":"<!--Emphasis Type='Bold' removed-->Conflict of Interests"}}]}}