{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,6]],"date-time":"2026-03-06T18:53:32Z","timestamp":1772823212400,"version":"3.50.1"},"reference-count":35,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2024,9,21]],"date-time":"2024-09-21T00:00:00Z","timestamp":1726876800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,9,21]],"date-time":"2024-09-21T00:00:00Z","timestamp":1726876800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"Tianjin Natural Science Foundation under Grant","award":["22JCYBJC00030"],"award-info":[{"award-number":["22JCYBJC00030"]}]},{"name":"the National Natural Science Foundation of China under Grants","award":["62376196"],"award-info":[{"award-number":["62376196"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2024,10]]},"DOI":"10.1007\/s00530-024-01481-y","type":"journal-article","created":{"date-parts":[[2024,9,21]],"date-time":"2024-09-21T19:01:31Z","timestamp":1726945291000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Scene-text aware cross-modal retrieval based on semantic matching (ChinaMM2024)"],"prefix":"10.1007","volume":"30","author":[{"given":"Suyan","family":"Cheng","sequence":"first","affiliation":[]},{"given":"Feifei","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Xi","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Zhuo","family":"Sun","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,21]]},"reference":[{"key":"1481_CR1","unstructured":"Faghri, F., Fleet, D.J., Kiros, J.R., Fidler, S.V.: Improving visual-semantic embeddings with hard negatives. pp. 7161\u20137170. arXiv preprint arXiv:1707.05612 (2017)"},{"key":"1481_CR2","doi-asserted-by":"crossref","unstructured":"Lee, K.-H., Chen, X., Hua, G., Hu, H., He, X.: Stacked cross attention for image-text matching. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 201\u2013216 (2018)","DOI":"10.1007\/978-3-030-01225-0_13"},{"key":"1481_CR3","doi-asserted-by":"crossref","unstructured":"Chen, Y., Ma, Z., Zhang, Z., Qi, Z., Yuan, C., Shan, Y., Li, B., Hu, W., Qie, X., Wu, J.: Vilem: visual-language error modeling for image-text retrieval. In: 2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Vancouver, Canada, pp. 11018\u201311027 (2023)","DOI":"10.1109\/CVPR52729.2023.01060"},{"key":"1481_CR4","doi-asserted-by":"crossref","unstructured":"Mafla, A., Dey, S., Biten, A.F., Gomez, L., Karatzas, D.: Multi-modal reasoning graph for scene-text based fine-grained image classification and retrieval. In: 2021 IEEE Winter Conference on Applications of Computer Vision (WACV), Virtual, pp. 4023\u20134033 (2021)","DOI":"10.1109\/WACV48630.2021.00407"},{"key":"1481_CR5","doi-asserted-by":"crossref","unstructured":"Mafla, A., Rezende, R.S., Gomez, L., Larlus, D., Karatzas, D.: Stacmr: scene-text aware cross-modal retrieval. In: 2021 IEEE Winter Conference on Applications of Computer Vision (WACV), Virtual, pp. 2220\u20132230 (2021)","DOI":"10.1109\/WACV48630.2021.00227"},{"key":"1481_CR6","doi-asserted-by":"crossref","unstructured":"Wang, J., Tang, J., Yang, M., Bai, X., Luo, J.: Improving ocr-based image captioning by incorporating geometrical relationship. In: 2021 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Nashville, TN, USA, pp. 1306\u20131315 (2021)","DOI":"10.1109\/CVPR46437.2021.00136"},{"key":"1481_CR7","doi-asserted-by":"crossref","unstructured":"Wang, H., Bai, X., Yang, M., Zhu, S., Wang, J., Liu, W.: Scene text retrieval via joint text detection and similarity learning. In: 2021 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Nashville, TN, USA, pp. 4558\u20134567 (2021)","DOI":"10.1109\/CVPR46437.2021.00453"},{"key":"1481_CR8","doi-asserted-by":"crossref","unstructured":"Gu, J., Cai, J., Joty, S.R., Niu, L., Wang, G.: Look, imagine and match: improving textual-visual cross-modal retrieval with generative models. In: 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Salt Lake City, UT, USA, pp. 7181\u20137189 (2018)","DOI":"10.1109\/CVPR.2018.00750"},{"key":"1481_CR9","doi-asserted-by":"crossref","unstructured":"Sarafianos, N., Xu, X., Kakadiaris, I.A.: Adversarial representation learning for text-to-image matching. In: 2019 IEEE\/CVF International Conference on Computer Vision (ICCV), Seoul, Korea (South), pp. 5814\u20135824 (2019)","DOI":"10.1109\/ICCV.2019.00591"},{"key":"1481_CR10","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2023.110280","volume":"263","author":"G Zhao","year":"2023","unstructured":"Zhao, G., Zhang, C., Shang, H., Wang, Y., Zhu, L., Qian, X.: Generative label fused network for image-text matching. Knowl. Based Syst. 263, 110280 (2023)","journal-title":"Knowl. Based Syst."},{"key":"1481_CR11","doi-asserted-by":"crossref","unstructured":"Pan, Z., Wu, F., Zhang, B.: Fine-grained image-text matching by cross-modal hard aligning network. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 19275\u201319284 (2023)","DOI":"10.1109\/CVPR52729.2023.01847"},{"issue":"2","key":"1481_CR12","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3383184","volume":"16","author":"Z Zheng","year":"2020","unstructured":"Zheng, Z., Zheng, L., Garrett, M., Yang, Y., Xu, M., Shen, Y.-D.: Dual-path convolutional image-text embeddings with instance loss. ACM Trans. Multimed. Comput. Commun. Appl. (TOMM) 16(2), 1\u201323 (2020)","journal-title":"ACM Trans. Multimed. Comput. Commun. Appl. (TOMM)"},{"key":"1481_CR13","doi-asserted-by":"crossref","unstructured":"Wang, L., Li, Y., Lazebnik, S.: Learning deep structure-preserving image-text embeddings. In: 2016 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Las Vegas, NV, USA, pp. 5005\u20135013 (2016)","DOI":"10.1109\/CVPR.2016.541"},{"key":"1481_CR14","doi-asserted-by":"crossref","unstructured":"Niu, Z., Zhou, M., Wang, L., Gao, X., Hua, G.: Hierarchical multimodal lstm for dense visual-semantic embedding. In: 2017 IEEE International Conference on Computer Vision (ICCV), Venice, Italy, pp. 1881\u20131889 (2017)","DOI":"10.1109\/ICCV.2017.208"},{"key":"1481_CR15","doi-asserted-by":"crossref","unstructured":"Sidorov, O., Hu, R., Rohrbach, M., Singh, A.: Textcaps: a dataset for image captioning with reading comprehension. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part II 16, pp. 742\u2013758. Springer (2020)","DOI":"10.1007\/978-3-030-58536-5_44"},{"key":"1481_CR16","doi-asserted-by":"crossref","unstructured":"Hu, R., Singh, A., Darrell, T., Rohrbach, M.: Iterative answer prediction with pointer-augmented multimodal transformers for textvqa. In: 2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Seattle, WA, USA, pp. 9992\u201310002 (2020)","DOI":"10.1109\/CVPR42600.2020.01001"},{"key":"1481_CR17","doi-asserted-by":"crossref","unstructured":"Biten, A.F., Tito, R., Mafla, A., Gomez, L., Rusinol, M., Valveny, E., Jawahar, C., Karatzas, D.: Scene text visual question answering. In: 2019 IEEE\/CVF International Conference on Computer Vision (ICCV), Seoul, Korea (South), pp. 4291\u20134301 (2019)","DOI":"10.1109\/ICCV.2019.00439"},{"key":"1481_CR18","doi-asserted-by":"crossref","unstructured":"Singh, A., Natarajan, V., Shah, M., Jiang, Y., Chen, X., Batra, D., Parikh, D., Rohrbach, M.: Towards vqa models that can read. In: 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Long Beach, CA, USA, pp. 8317\u20138326 (2019)","DOI":"10.1109\/CVPR.2019.00851"},{"key":"1481_CR19","doi-asserted-by":"crossref","unstructured":"Mishra, A., Shekhar, S., Singh, A.K., Chakraborty, A.: Ocr-vqa: visual question answering by reading text in images. In: 2019 International Conference on Document Analysis and Recognition (ICDAR), pp. 947\u2013952. IEEE (2019)","DOI":"10.1109\/ICDAR.2019.00156"},{"key":"1481_CR20","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2023.104751","volume":"136","author":"G Lv","year":"2023","unstructured":"Lv, G., Sun, Y., Nian, F., Zhu, M., Tang, W., Hu, Z.: Come: Clip-ocr and master object for text image captioning. Image Vis. Comput. 136, 104751 (2023)","journal-title":"Image Vis. Comput."},{"key":"1481_CR21","unstructured":"Singh, A., Natarajan, V., Jiang, Y., Chen, X., Shah, M., Rohrbach, M., Batra, D., Parikh, D.: Pythia-a platform for vision & language research. In: SysML Workshop, NeurIPS, vol. 2018 (2018)"},{"key":"1481_CR22","doi-asserted-by":"crossref","unstructured":"Biten, A.F., Tito, R., Mafla, A., Gomez, L., Rusinol, M., Mathew, M., Jawahar, C., Valveny, E., Karatzas, D.: Icdar 2019 competition on scene text visual question answering. In: 2019 International Conference on Document Analysis and Recognition (ICDAR), pp. 1563\u20131570. IEEE (2019)","DOI":"10.1109\/ICDAR.2019.00251"},{"key":"1481_CR23","doi-asserted-by":"crossref","unstructured":"Borisyuk, F., Gordo, A., Sivakumar, V.: Rosetta: large scale system for text detection and recognition in images. In: Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pp. 71\u201379 (2018)","DOI":"10.1145\/3219819.3219861"},{"key":"1481_CR24","doi-asserted-by":"publisher","first-page":"135","DOI":"10.1162\/tacl_a_00051","volume":"5","author":"P Bojanowski","year":"2017","unstructured":"Bojanowski, P., Grave, E., Joulin, A., Mikolov, T.: Enriching word vectors with subword information. Trans. Assoc. Comput. Linguist. 5, 135\u2013146 (2017)","journal-title":"Trans. Assoc. Comput. Linguist."},{"issue":"6","key":"1481_CR25","doi-asserted-by":"publisher","first-page":"1137","DOI":"10.1109\/TPAMI.2016.2577031","volume":"39","author":"S Ren","year":"2016","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster R-CNN: Towards real-time object detection with region proposal networks. IEEE Trans. Pattern Anal. Mach. Intell. 39(6), 1137\u20131149 (2016)","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"1481_CR26","unstructured":"Devlin, J., Chang, M.-W., Lee, K., Toutanova, K.: Bert: pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"},{"key":"1481_CR27","doi-asserted-by":"publisher","first-page":"32","DOI":"10.1007\/s11263-016-0981-7","volume":"123","author":"R Krishna","year":"2017","unstructured":"Krishna, R., Zhu, Y., Groth, O., Johnson, J., Hata, K., Kravitz, J., Chen, S., Kalantidis, Y., Li, L.-J., Shamma, D.A., et al.: Visual genome: connecting language and vision using crowdsourced dense image annotations. Int. J. Comput. Vis. 123, 32\u201373 (2017)","journal-title":"Int. J. Comput. Vis."},{"key":"1481_CR28","doi-asserted-by":"crossref","unstructured":"Manning, C.D., Surdeanu, M., Bauer, J., Finkel, J.R., Bethard, S., McClosky, D.: The stanford corenlp natural language processing toolkit. In: Proceedings of 52nd Annual Meeting of the Association for Computational Linguistics: System Demonstrations, pp. 55\u201360 (2014)","DOI":"10.3115\/v1\/P14-5010"},{"key":"1481_CR29","first-page":"5998","volume":"30","author":"A Vaswani","year":"2017","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, \u0141., Polosukhin, I.: Attention is all you need. Adv. Neural Inf. Process. Syst. 30, 5998\u20136008 (2017)","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"1481_CR30","doi-asserted-by":"crossref","unstructured":"Li, K., Zhang, Y., Li, K., Li, Y., Fu, Y.: Visual semantic reasoning for image-text matching. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 4654\u20134662 (2019)","DOI":"10.1109\/ICCV.2019.00475"},{"key":"1481_CR31","doi-asserted-by":"crossref","unstructured":"Diao, H., Zhang, Y., Ma, L., Lu, H.: Similarity reasoning and filtration for image-text matching. arXiv preprint arXiv:2101.01368 (2021)","DOI":"10.1609\/aaai.v35i2.16209"},{"key":"1481_CR32","doi-asserted-by":"crossref","unstructured":"Chen, H., Ding, G., Liu, X., Lin, Z., Liu, J., Han, J.: Imram: iterative matching with recurrent attention memory for cross-modal image-text retrieval. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12655\u201312663 (2020)","DOI":"10.1109\/CVPR42600.2020.01267"},{"key":"1481_CR33","doi-asserted-by":"crossref","unstructured":"Liu, C., Mao, Z., Zhang, T., Xie, H., Wang, B., Zhang, Y.: Graph structured network for image-text matching. In: 2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Seattle, WA, USA, pp. 10921\u201310930 (2020)","DOI":"10.1109\/CVPR42600.2020.01093"},{"key":"1481_CR34","doi-asserted-by":"crossref","unstructured":"Fu, Z., Mao, Z., Song, Y., Zhang, Y.: Learning semantic relationship among instances for image-text matching. In: 2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Vancouver, Canada, pp. 15159\u201315168 (2023)","DOI":"10.1109\/CVPR52729.2023.01455"},{"key":"1481_CR35","doi-asserted-by":"crossref","unstructured":"Cheng, M., Sun, Y., Wang, L., Zhu, X., Yao, K., Chen, J., Song, G., Han, J., Liu, J., Ding, E., et al.: Vista: vision and scene text aggregation for cross-modal retrieval. In: 2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), New Orleans, LA, USA, pp. 5184\u20135193 (2022)","DOI":"10.1109\/CVPR52688.2022.00512"}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01481-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-024-01481-y\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01481-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T18:16:16Z","timestamp":1730139376000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-024-01481-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,21]]},"references-count":35,"journal-issue":{"issue":"5","published-print":{"date-parts":[[2024,10]]}},"alternative-id":["1481"],"URL":"https:\/\/doi.org\/10.1007\/s00530-024-01481-y","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,9,21]]},"assertion":[{"value":"6 July 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"5 September 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 September 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"284"}}