{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T17:16:56Z","timestamp":1765041416422,"version":"3.40.3"},"publisher-location":"Cham","reference-count":37,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031463105"},{"type":"electronic","value":"9783031463112"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-46311-2_32","type":"book-chapter","created":{"date-parts":[[2023,10,28]],"date-time":"2023-10-28T19:01:50Z","timestamp":1698519710000},"page":"385-395","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["CSDNet: Contrastive Similarity Distillation Network for\u00a0Multi-lingual Image-Text Retrieval"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1272-8723","authenticated-orcid":false,"given":"Shichen","family":"Lu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4340-4000","authenticated-orcid":false,"given":"Longteng","family":"Guo","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5396-6253","authenticated-orcid":false,"given":"Xingjian","family":"He","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2142-5580","authenticated-orcid":false,"given":"Xinxin","family":"Zhu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0903-9131","authenticated-orcid":false,"given":"Jing","family":"Liu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9180-2935","authenticated-orcid":false,"given":"Si","family":"Liu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,10,29]]},"reference":[{"key":"32_CR1","first-page":"1","volume":"6","author":"Y Bengio","year":"1993","unstructured":"Bengio, Y., LeCun, Y., Henderson, D.: Globally trained handwritten word recognizer using spatial representation, convolutional neural networks, and hidden markov models. Adv. Neural Inf. Process. Syst. 6, 1\u20138 (1993)","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"32_CR2","unstructured":"Chen, X., et al.: Microsoft coco captions: Data collection and evaluation server. arXiv preprint arXiv:1504.00325 (2015)"},{"key":"32_CR3","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"104","DOI":"10.1007\/978-3-030-58577-8_7","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Y-C Chen","year":"2020","unstructured":"Chen, Y.-C., et al.: UNITER: UNiversal image-TExt representation learning. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12375, pp. 104\u2013120. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58577-8_7"},{"key":"32_CR4","doi-asserted-by":"crossref","unstructured":"Chi, Z., et al.: Infoxlm: an information-theoretic framework for cross-lingual language model pre-training. arXiv preprint arXiv:2007.07834 (2020)","DOI":"10.18653\/v1\/2021.naacl-main.280"},{"key":"32_CR5","doi-asserted-by":"crossref","unstructured":"Conneau, A., et al.: Unsupervised cross-lingual representation learning at scale. arXiv preprint arXiv:1911.02116 (2019)","DOI":"10.18653\/v1\/2020.acl-main.747"},{"key":"32_CR6","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"},{"key":"32_CR7","first-page":"1","volume":"32","author":"L Dong","year":"2019","unstructured":"Dong, L., et al.: Unified language model pre-training for natural language understanding and generation. Adv. Neural Inf. Process. Syst. 32, 1\u201313 (2019)","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"32_CR8","unstructured":"Faghri, F., Fleet, D.J., Kiros, J.R., Fidler, S.: Vse++: improving visual-semantic embeddings with hard negatives. arXiv preprint arXiv:1707.05612 (2017)"},{"key":"32_CR9","doi-asserted-by":"crossref","unstructured":"Guo, L., Liu, J., Zhu, X., Yao, P., Lu, S., Lu, H.: Normalized and geometry-aware self-attention network for image captioning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10327\u201310336 (2020)","DOI":"10.1109\/CVPR42600.2020.01034"},{"key":"32_CR10","unstructured":"Houlsby, N., et al.: Parameter-efficient transfer learning for nlp. In: International Conference on Machine Learning, pp. 2790\u20132799. PMLR (2019)"},{"key":"32_CR11","doi-asserted-by":"crossref","unstructured":"Huang, H., et al.: Unicoder: a universal language encoder by pre-training with multiple cross-lingual tasks. arXiv preprint arXiv:1909.00964 (2019)","DOI":"10.18653\/v1\/D19-1252"},{"key":"32_CR12","doi-asserted-by":"crossref","unstructured":"Jain, A., et al.: Mural: multimodal, multitask retrieval across languages. arXiv preprint arXiv:2109.05125 (2021)","DOI":"10.18653\/v1\/2021.findings-emnlp.293"},{"key":"32_CR13","unstructured":"Jia, C., et al.: Scaling up visual and vision-language representation learning with noisy text supervision. In: International Conference on Machine Learning, pp. 4904\u20134916. PMLR (2021)"},{"key":"32_CR14","doi-asserted-by":"crossref","unstructured":"Karpathy, A., Fei-Fei, L.: Deep visual-semantic alignments for generating image descriptions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3128\u20133137 (2015)","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"32_CR15","unstructured":"Lample, G., Conneau, A.: Cross-lingual language model pretraining. arXiv preprint arXiv:1901.07291 (2019)"},{"key":"32_CR16","unstructured":"Li, F., et al.: Vision-language intelligence: tasks, representation learning, and large models. arXiv preprint arXiv:2203.01922 (2022)"},{"issue":"9","key":"32_CR17","doi-asserted-by":"publisher","first-page":"2347","DOI":"10.1109\/TMM.2019.2896494","volume":"21","author":"X Li","year":"2019","unstructured":"Li, X., et al.: COCO-CN for cross-lingual image tagging, captioning, and retrieval. IEEE Trans. Multimedia 21(9), 2347\u20132360 (2019)","journal-title":"IEEE Trans. Multimedia"},{"key":"32_CR18","unstructured":"Liu, W., Chen, S., Guo, L., Zhu, X., Liu, J.: CPTR: full transformer network for image captioning. arXiv preprint arXiv:2101.10804 (2021)"},{"key":"32_CR19","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101 (2017)"},{"key":"32_CR20","doi-asserted-by":"crossref","unstructured":"Lu, H., Fei, N., Huo, Y., Gao, Y., Lu, Z., Wen, J.R.: Cots: collaborative two-stream vision-language pre-training model for cross-modal retrieval. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 15692\u201315701 (2022)","DOI":"10.1109\/CVPR52688.2022.01524"},{"key":"32_CR21","first-page":"1","volume":"32","author":"J Lu","year":"2019","unstructured":"Lu, J., Batra, D., Parikh, D., Lee, S.: Vilbert: pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. Adv. Neural Inf. Process. Syst. 32, 1\u201311 (2019)","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"32_CR22","doi-asserted-by":"crossref","unstructured":"Luo, Z., Xi, Y., Zhang, R., Li, G., Zhao, Z., Ma, J.: Conditioned masked language and image modeling for image-text dense retrieval. In: Findings of the Association for Computational Linguistics: EMNLP 2022, pp. 130\u2013140 (2022)","DOI":"10.18653\/v1\/2022.findings-emnlp.10"},{"key":"32_CR23","doi-asserted-by":"crossref","unstructured":"Ni, M., et al.: M3p: learning universal representations via multitask multilingual multimodal pre-training. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3977\u20133986 (2021)","DOI":"10.1109\/CVPR46437.2021.00397"},{"key":"32_CR24","unstructured":"Oord, A.V.D., Li, Y., Vinyals, O.: Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748 (2018)"},{"key":"32_CR25","doi-asserted-by":"crossref","unstructured":"Pfeiffer, J., Vuli\u0107, I., Gurevych, I., Ruder, S.: Mad-x: an adapter-based framework for multi-task cross-lingual transfer. arXiv preprint arXiv:2005.00052 (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.617"},{"key":"32_CR26","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"key":"32_CR27","doi-asserted-by":"crossref","unstructured":"Sharma, P., Ding, N., Goodman, S., Soricut, R.: Conceptual captions: a cleaned, hypernymed, image alt-text dataset for automatic image captioning. In: Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics, vol. 1: Long Papers, pp. 2556\u20132565 (2018)","DOI":"10.18653\/v1\/P18-1238"},{"key":"32_CR28","unstructured":"Su, W., et al.: Vl-bert: pre-training of generic visual-linguistic representations. arXiv preprint arXiv:1908.08530 (2019)"},{"key":"32_CR29","doi-asserted-by":"crossref","unstructured":"Sun, S., Chen, Y.C., Li, L., Wang, S., Fang, Y., Liu, J.: Lightningdot: pre-training visual-semantic embeddings for real-time image-text retrieval. In: Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 982\u2013997 (2021)","DOI":"10.18653\/v1\/2021.naacl-main.77"},{"key":"32_CR30","doi-asserted-by":"crossref","unstructured":"Tan, H., Bansal, M.: Lxmert: learning cross-modality encoder representations from transformers. arXiv preprint arXiv:1908.07490 (2019)","DOI":"10.18653\/v1\/D19-1514"},{"issue":"2","key":"32_CR31","doi-asserted-by":"publisher","first-page":"394","DOI":"10.1109\/TPAMI.2018.2797921","volume":"41","author":"L Wang","year":"2018","unstructured":"Wang, L., Li, Y., Huang, J., Lazebnik, S.: Learning two-branch neural networks for image-text matching tasks. IEEE Trans. Pattern Anal. Mach. Intell. 41(2), 394\u2013407 (2018)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"32_CR32","doi-asserted-by":"crossref","unstructured":"Xu, C., Zhou, W., Ge, T., Wei, F., Zhou, M.: Bert-of-theseus: compressing bert by progressive module replacing. arXiv preprint arXiv:2002.02925 (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.633"},{"key":"32_CR33","doi-asserted-by":"crossref","unstructured":"Yoshikawa, Y., Shigeto, Y., Takeuchi, A.: Stair captions: constructing a large-scale Japanese image caption dataset. arXiv preprint arXiv:1705.00823 (2017)","DOI":"10.18653\/v1\/P17-2066"},{"key":"32_CR34","doi-asserted-by":"publisher","first-page":"67","DOI":"10.1162\/tacl_a_00166","volume":"2","author":"P Young","year":"2014","unstructured":"Young, P., Lai, A., Hodosh, M., Hockenmaier, J.: From image descriptions to visual denotations: new similarity metrics for semantic inference over event descriptions. Trans. Assoc. Comput. Linguist. 2, 67\u201378 (2014)","journal-title":"Trans. Assoc. Comput. Linguist."},{"key":"32_CR35","doi-asserted-by":"crossref","unstructured":"Zhao, Z., Guo, L., He, X., Shao, S., Yuan, Z., Liu, J.: Mamo: masked multimodal modeling for fine-grained vision-language representation learning. arXiv preprint arXiv:2210.04183 (2022)","DOI":"10.1145\/3539618.3591721"},{"key":"32_CR36","doi-asserted-by":"crossref","unstructured":"Zhou, M., et al.: Uc2: universal cross-lingual cross-modal vision-and-language pre-training. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4155\u20134165 (2021)","DOI":"10.1109\/CVPR46437.2021.00414"},{"key":"32_CR37","unstructured":"Zhou, W., Lee, D.H., Selvam, R.K., Lee, S., Lin, B.Y., Ren, X.: Pre-training text-to-text transformers for concept-centric common sense. arXiv preprint arXiv:2011.07956 (2020)"}],"container-title":["Lecture Notes in Computer Science","Image and Graphics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-46311-2_32","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,10,28]],"date-time":"2023-10-28T19:05:26Z","timestamp":1698519926000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-46311-2_32"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031463105","9783031463112"],"references-count":37,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-46311-2_32","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"29 October 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICIG","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Image and Graphics","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Nanjing","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"22 September 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"24 September 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"12","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icig2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/icig2023.csig.org.cn\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Conference Management Toolkit","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"409","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"166","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"41% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}