{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,25]],"date-time":"2025-03-25T18:00:10Z","timestamp":1742925610305,"version":"3.40.3"},"publisher-location":"Cham","reference-count":22,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031441974"},{"type":"electronic","value":"9783031441981"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-44198-1_22","type":"book-chapter","created":{"date-parts":[[2023,9,21]],"date-time":"2023-09-21T08:02:34Z","timestamp":1695283354000},"page":"255-267","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Glancing Text and\u00a0Vision Regularized Training to\u00a0Enhance Machine Translation"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-7231-4063","authenticated-orcid":false,"given":"Pei","family":"Cheng","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1539-712X","authenticated-orcid":false,"given":"Xiayang","family":"Shi","sequence":"additional","affiliation":[]},{"given":"Beibei","family":"Liu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6758-4122","authenticated-orcid":false,"given":"Meng","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,9,22]]},"reference":[{"key":"22_CR1","doi-asserted-by":"crossref","unstructured":"Caglayan, O., et al.: Cross-lingual visual pre-training for multimodal machine translation. In: Merlo, P., Tiedemann, J., Tsarfaty, R. (eds.) Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, EACL 2021, Online, 19\u201323 April 2021, pp. 1317\u20131324. Association for Computational Linguistics (2021). https:\/\/aclanthology.org\/2021.eacl-main.112\/","DOI":"10.18653\/v1\/2021.eacl-main.112"},{"key":"22_CR2","doi-asserted-by":"crossref","unstructured":"Caglayan, O., Madhyastha, P., Specia, L., Barrault, L.: Probing the need for visual context in multimodal machine translation. CoRR abs\/1903.08678 (2019). http:\/\/arxiv.org\/abs\/1903.08678","DOI":"10.18653\/v1\/N19-1422"},{"key":"22_CR3","doi-asserted-by":"crossref","unstructured":"Calixto, I., Liu, Q., Campbell, N.: Incorporating global visual features into attention-based neural machine translation. CoRR abs\/1701.06521 (2017). http:\/\/arxiv.org\/abs\/1701.06521","DOI":"10.18653\/v1\/D17-1105"},{"key":"22_CR4","doi-asserted-by":"publisher","unstructured":"Elliott, D.: Adversarial evaluation of multimodal machine translation. In: Riloff, E., Chiang, D., Hockenmaier, J., Tsujii, J. (eds.) Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, Brussels, Belgium, 31 October\u20134 November 2018, pp. 2974\u20132978. Association for Computational Linguistics (2018). https:\/\/doi.org\/10.18653\/v1\/d18-1329","DOI":"10.18653\/v1\/d18-1329"},{"key":"22_CR5","doi-asserted-by":"publisher","unstructured":"Elliott, D., Frank, S., Sima\u2019an, K., Specia, L.: Multi30K: multilingual English-German image descriptions. In: Proceedings of the 5th Workshop on Vision and Language, pp. 70\u201374. Association for Computational Linguistics, Berlin, Germany, August 2016. https:\/\/doi.org\/10.18653\/v1\/W16-3210. https:\/\/aclanthology.org\/W16-3210","DOI":"10.18653\/v1\/W16-3210"},{"key":"22_CR6","unstructured":"Elliott, D., K\u00e1d\u00e1r, \u00c1.: Imagination improves multimodal translation. In: Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 130\u2013141. Asian Federation of Natural Language Processing, Taipei, Taiwan, November 2017. https:\/\/aclanthology.org\/I17-1014"},{"key":"22_CR7","doi-asserted-by":"publisher","unstructured":"Huang, P., Liu, F., Shiang, S., Oh, J., Dyer, C.: Attention-based multimodal neural machine translation. In: Proceedings of the First Conference on Machine Translation, WMT 2016, Colocated with ACL 2016, 11\u201312 August, Berlin, Germany, pp. 639\u2013645. The Association for Computer Linguistics (2016). https:\/\/doi.org\/10.18653\/v1\/w16-2360","DOI":"10.18653\/v1\/w16-2360"},{"key":"22_CR8","unstructured":"Lee, J., Cho, K., Weston, J., Kiela, D.: Emergent translation in multi-agent communication. CoRR abs\/1710.06922 (2017). http:\/\/arxiv.org\/abs\/1710.06922"},{"key":"22_CR9","doi-asserted-by":"publisher","unstructured":"Lin, H., et al.: Dynamic context-guided capsule network for multimodal machine translation. In: Proceedings of the 28th ACM International Conference on Multimedia, MM 2020, pp. 1320\u20131329. Association for Computing Machinery, New York, NY, USA (2020). https:\/\/doi.org\/10.1145\/3394171.3413715","DOI":"10.1145\/3394171.3413715"},{"key":"22_CR10","doi-asserted-by":"publisher","unstructured":"Liu, J.: Multimodal machine translation. IEEE Access, 1 (2021). https:\/\/doi.org\/10.1109\/ACCESS.2021.3115135","DOI":"10.1109\/ACCESS.2021.3115135"},{"key":"22_CR11","doi-asserted-by":"crossref","unstructured":"Post, M.: A call for clarity in reporting BLEU scores. In: Proceedings of the Third Conference on Machine Translation: Research Papers, pp. 186\u2013191. Association for Computational Linguistics, Belgium, Brussels, October 2018. https:\/\/www.aclweb.org\/anthology\/W18-6319","DOI":"10.18653\/v1\/W18-6319"},{"key":"22_CR12","unstructured":"Sanh, V., Debut, L., Chaumond, J., Wolf, T.: DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter. CoRR abs\/1910.01108 (2019). http:\/\/arxiv.org\/abs\/1910.01108"},{"key":"22_CR13","unstructured":"Satanjeev, B.: METEOR: an automatic metric for MT evaluation with improved correlation with human judgments. In: ACL-2005, pp. 228\u2013231 (2005)"},{"key":"22_CR14","doi-asserted-by":"crossref","unstructured":"Sun, Y., Zhu, S., Yifan, F., Mi, C.: Parallel sentences mining with transfer learning in an unsupervised setting. In: North American Chapter of the Association for Computational Linguistics (2021)","DOI":"10.18653\/v1\/2021.naacl-srw.17"},{"key":"22_CR15","unstructured":"Vaswani, A., et al.: Attention is all you need. arXiv (2017)"},{"key":"22_CR16","unstructured":"Veli\u010dkovi\u0107, P., Cucurull, G., Casanova, A., Romero, A., Li\u00f2, P., Bengio, Y.: Graph attention networks. In: International Conference on Learning Representations (2018). https:\/\/openreview.net\/forum?id=rJXMpikCZ"},{"key":"22_CR17","unstructured":"Wang, D., Xiong, D.: Efficient object-level visual context modeling for multimodal machine translation: masking irrelevant objects helps grounding. CoRR abs\/2101.05208 (2021). https:\/\/arxiv.org\/abs\/2101.05208"},{"key":"22_CR18","doi-asserted-by":"crossref","unstructured":"Yang, P., Chen, B., Zhang, P., Sun, X.: Visual agreement regularized training for multi-modal machine translation. In: The Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, The Thirty-Second Innovative Applications of Artificial Intelligence Conference, IAAI 2020, The Tenth AAAI Symposium on Educational Advances in Artificial Intelligence, EAAI 2020, New York, NY, USA, 7\u201312 February 2020, pp. 9418\u20139425. AAAI Press (2020). https:\/\/aaai.org\/ojs\/index.php\/AAAI\/article\/view\/6484","DOI":"10.1609\/aaai.v34i05.6484"},{"key":"22_CR19","doi-asserted-by":"crossref","unstructured":"Yao, S., Wan, X.: Multimodal transformer for multimodal machine translation. In: Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics (2020)","DOI":"10.18653\/v1\/2020.acl-main.400"},{"key":"22_CR20","unstructured":"Zhang, Z., et al.: Neural machine translation with universal visual representation. In: 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, 26\u201330 April 2020. OpenReview.net (2020). https:\/\/openreview.net\/forum?id=Byl8hhNYPS"},{"key":"22_CR21","doi-asserted-by":"crossref","unstructured":"Zhou, M., Cheng, R., Lee, Y.J., Yu, Z.: A visual attention grounding neural model for multimodal machine translation. CoRR abs\/1808.08266 (2018). http:\/\/arxiv.org\/abs\/1808.08266","DOI":"10.18653\/v1\/D18-1400"},{"key":"22_CR22","doi-asserted-by":"publisher","first-page":"503","DOI":"10.1007\/s10590-021-09274-0","volume":"35","author":"S Zhu","year":"2021","unstructured":"Zhu, S., Mi, C., Li, T., Zhang, F., Zhang, Z., Sun, Y.: Improving bilingual word embeddings mapping with monolingual context information. Mach. Transl. 35, 503\u2013518 (2021)","journal-title":"Mach. Transl."}],"container-title":["Lecture Notes in Computer Science","Artificial Neural Networks and Machine Learning \u2013 ICANN 2023"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-44198-1_22","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,28]],"date-time":"2023-11-28T22:13:31Z","timestamp":1701209611000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-44198-1_22"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031441974","9783031441981"],"references-count":22,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-44198-1_22","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"22 September 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICANN","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Artificial Neural Networks","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Heraklion","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Greece","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 September 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"32","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icann2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/e-nns.org\/icann2023\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"easyacademia.org","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"947","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"426","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"22","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"45% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.4","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"type of other papers accepted  : 9 Abstract","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}