{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,31]],"date-time":"2025-10-31T07:01:35Z","timestamp":1761894095837,"version":"build-2065373602"},"publisher-location":"Singapore","reference-count":26,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819527243","type":"print"},{"value":"9789819527250","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-981-95-2725-0_13","type":"book-chapter","created":{"date-parts":[[2025,10,31]],"date-time":"2025-10-31T05:18:50Z","timestamp":1761887930000},"page":"194-207","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["MQM-MSC: Enhancing Translation Quality Estimation Interpretability with\u00a0Mask-Driven Self-correction in\u00a0Large Language Models"],"prefix":"10.1007","author":[{"given":"Guanghui","family":"Cai","sequence":"first","affiliation":[]},{"given":"Junguo","family":"Zhu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,11,1]]},"reference":[{"key":"13_CR1","unstructured":"Banerjee, S., Lavie, A.: Meteor: an automatic metric for MT evaluation with improved correlation with human judgments. In: Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and\/or Summarization, pp. 65\u201372 (2005)"},{"key":"13_CR2","doi-asserted-by":"crossref","unstructured":"Deutsch, D., Foster, G., Freitag, M.: Ties matter: meta-evaluating modern metrics with pairwise accuracy and tie calibration. arXiv preprint arXiv:2305.14324 (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.798"},{"key":"13_CR3","doi-asserted-by":"publisher","unstructured":"Freitag, M., Foster, G., Grangier, D., Ratnakar, V., Tan, Q., Macherey, W.: Experts, errors, and context: a large-scale study of human evaluation for machine translation. Trans. Assoc. Comput. Linguist. 9, 1460\u20131474 (2021). https:\/\/doi.org\/10.1162\/tacl_a_00437","DOI":"10.1162\/tacl_a_00437"},{"key":"13_CR4","doi-asserted-by":"crossref","unstructured":"Freitag, M., et al.: Results of WMT23 metrics shared task: metrics might be guilty but references are not innocent. In: Proceedings of the Eighth Conference on Machine Translation, pp. 578\u2013628 (2023)","DOI":"10.18653\/v1\/2023.wmt-1.51"},{"key":"13_CR5","unstructured":"Freitag, M., et al.: Results of WMT22 metrics shared task: stop using BLEU \u2013 neural metrics are better and more robust. In: Koehn, P., et al. (eds.) Proceedings of the Seventh Conference on Machine Translation (WMT), pp. 46\u201368. Association for Computational Linguistics, Abu Dhabi, United Arab Emirates (Hybrid) (2022). https:\/\/aclanthology.org\/2022.wmt-1.2\/"},{"key":"13_CR6","unstructured":"Freitag, M., et al.: Results of WMT22 metrics shared task: Stop using bleu\u2013neural metrics are better and more robust. In: Proceedings of the Seventh Conference on Machine Translation (WMT), pp. 46\u201368 (2022)"},{"key":"13_CR7","unstructured":"Gou, Z., et al.: Critic: large language models can self-correct with tool-interactive critiquing. arXiv preprint arXiv:2305.11738 (2023)"},{"key":"13_CR8","doi-asserted-by":"crossref","unstructured":"Guerreiro, N.M., Rei, R., Stigt, D.V., Coheur, L., Colombo, P., Martins, A.F.: xCOMET: transparent machine translation evaluation through fine-grained error detection. Trans. Assoc. Comput. Linguist. 12, 979\u2013995 (2024)","DOI":"10.1162\/tacl_a_00683"},{"key":"13_CR9","unstructured":"Huang, J., et al.: Large language models cannot self-correct reasoning yet. arXiv preprint arXiv:2310.01798 (2023)"},{"key":"13_CR10","doi-asserted-by":"crossref","unstructured":"Kocmi, T., Federmann, C.: GEMBA-MQM: detecting translation quality error spans with GPT-4. arXiv preprint arXiv:2310.13988 (2023)","DOI":"10.18653\/v1\/2023.wmt-1.64"},{"key":"13_CR11","unstructured":"Kocmi, T., Federmann, C.: Large language models are state-of-the-art evaluators of translation quality. arXiv preprint arXiv:2302.14520 (2023)"},{"key":"13_CR12","unstructured":"Kocmi, T., Federmann, C., Grundkiewicz, R., Junczys-Dowmunt, M., Matsushita, H., Menezes, A.: To ship or not to ship: an extensive evaluation of automatic metrics for machine translation. arXiv preprint arXiv:2107.10821 (2021)"},{"key":"13_CR13","doi-asserted-by":"crossref","unstructured":"Leiter, C., Opitz, J., Deutsch, D., Gao, Y., Dror, R., Eger, S.: The Eval4NLP 2023 shared task on prompting large language models as explainable metrics. arXiv preprint arXiv:2310.19792 (2023)","DOI":"10.18653\/v1\/2023.eval4nlp-1.10"},{"key":"13_CR14","unstructured":"Lommel, A.R., Burchardt, A., Uszkoreit, H.: Multidimensional quality metrics: a flexible system for assessing translation quality. In: Proceedings of Translating and the Computer 35. Aslib, London, UK (2013). https:\/\/aclanthology.org\/2013.tc-1.6\/"},{"key":"13_CR15","unstructured":"Lu, Q., Ding, L., Zhang, K., Zhang, J., Tao, D.: MQM-APE: toward high-quality error annotation predictors with automatic post-editing in LLM translation evaluators. arXiv preprint arXiv:2409.14335 (2024)"},{"key":"13_CR16","doi-asserted-by":"crossref","unstructured":"Lu, Q., Qiu, B., Ding, L., Zhang, K., Kocmi, T., Tao, D.: Error analysis prompting enables human-like translation evaluation in large language models. arXiv preprint arXiv:2303.13809 (2023)","DOI":"10.20944\/preprints202303.0255.v1"},{"key":"13_CR17","first-page":"46534","volume":"36","author":"A Madaan","year":"2023","unstructured":"Madaan, A., et al.: Self-refine: iterative refinement with self-feedback. Adv. Neural. Inf. Process. Syst. 36, 46534\u201346594 (2023)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"13_CR18","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: Bleu: a method for automatic evaluation of machine translation. In: Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pp. 311\u2013318 (2002)","DOI":"10.3115\/1073083.1073135"},{"key":"13_CR19","unstructured":"Perrella, S., Proietti, L., Scir\u00c3, A., Campolungo, N., Navigli, R., et\u00a0al.: Matese: machine translation evaluation as a sequence tagging problem. In: Workshop on Statistical Machine Translation, pp. 569\u2013577. Association for Computational Linguistics (2022)"},{"key":"13_CR20","doi-asserted-by":"crossref","unstructured":"Rei, R., Stewart, C., Farinha, A.C., Lavie, A.: Comet: a neural framework for MT evaluation. arXiv preprint arXiv:2009.09025 (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.213"},{"key":"13_CR21","first-page":"8634","volume":"36","author":"N Shinn","year":"2023","unstructured":"Shinn, N., Cassano, F., Gopinath, A., Narasimhan, K., Yao, S.: Reflexion: language agents with verbal reinforcement learning. Adv. Neural. Inf. Process. Syst. 36, 8634\u20138652 (2023)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"13_CR22","unstructured":"Snover, M., Dorr, B., Schwartz, R., Micciulla, L., Makhoul, J.: A study of translation edit rate with targeted human annotation. In: Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers, pp. 223\u2013231 (2006)"},{"key":"13_CR23","unstructured":"Wan, Y., et al.: Unite: unified translation evaluation. arXiv preprint arXiv:2204.13346 (2022)"},{"key":"13_CR24","doi-asserted-by":"crossref","unstructured":"Wu, Z., Zeng, Q., Zhang, Z., Tan, Z., Shen, C., Jiang, M.: Large language models can self-correct with minimal effort. In: AI for Math Workshop@ ICML 2024 (2024)","DOI":"10.18653\/v1\/2024.emnlp-main.714"},{"key":"13_CR25","doi-asserted-by":"crossref","unstructured":"Xu, W., et al.: Instructscore: explainable text generation evaluation with finegrained feedback. arXiv preprint arXiv:2305.14282 (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.365"},{"key":"13_CR26","doi-asserted-by":"crossref","unstructured":"Zhao, H., et al.: From handcrafted features to LLMs: a brief survey for machine translation quality estimation. In: 2024 International Joint Conference on Neural Networks (IJCNN), pp. 1\u201310. IEEE (2024)","DOI":"10.1109\/IJCNN60899.2024.10650457"}],"container-title":["Lecture Notes in Computer Science","Chinese Computational Linguistics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-95-2725-0_13","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,31]],"date-time":"2025-10-31T05:18:58Z","timestamp":1761887938000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-95-2725-0_13"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,1]]},"ISBN":["9789819527243","9789819527250"],"references-count":26,"URL":"https:\/\/doi.org\/10.1007\/978-981-95-2725-0_13","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,11,1]]},"assertion":[{"value":"1 November 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"CCL","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China National Conference on Chinese Computational Linguistics","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Jinan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"11 August 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"14 August 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"24","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"cncl2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/link.springer.com\/conference\/cncl","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}