{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T17:19:35Z","timestamp":1743009575033,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":37,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819783663"},{"type":"electronic","value":"9789819783670"}],"license":[{"start":{"date-parts":[[2024,11,29]],"date-time":"2024-11-29T00:00:00Z","timestamp":1732838400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,29]],"date-time":"2024-11-29T00:00:00Z","timestamp":1732838400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-981-97-8367-0_31","type":"book-chapter","created":{"date-parts":[[2024,11,28]],"date-time":"2024-11-28T11:55:55Z","timestamp":1732794955000},"page":"522-539","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Chinese Grammatical Error Correction via\u00a0Large Language Model Guided Optimization Training"],"prefix":"10.1007","author":[{"given":"Xiao","family":"Liu","sequence":"first","affiliation":[]},{"given":"Ying","family":"Li","sequence":"additional","affiliation":[]},{"given":"Zhengtao","family":"Yu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,29]]},"reference":[{"key":"31_CR1","unstructured":"Asai, A., Wu, Z., Wang, Y., Sil, A., Hajishirzi, H.: Self-rag: Learning to retrieve, generate, and critique through self-reflection. CoRR (2023)"},{"key":"31_CR2","unstructured":"Bai, J., et al.: Qwen technical report. CoRR (2023)"},{"key":"31_CR3","unstructured":"Brown, T.B., et al.: Language models are few-shot learners. In: Proceedings of NeurIPS (2020)"},{"key":"31_CR4","doi-asserted-by":"crossref","unstructured":"Bryant, C., Yuan, Z., Qorib, M.R., Cao, H., Ng, H.T., Briscoe, T.: Grammatical error correction: a survey of the state of the art. CoRR (2022)","DOI":"10.1162\/coli_a_00478"},{"key":"31_CR5","doi-asserted-by":"crossref","unstructured":"Choe, Y.J., Ham, J., Park, K., Yoon, Y.: A neural grammatical error correction system built on better pre-training and sequential transfer learning. In: Proceedings of the Fourteenth Workshop on Innovative Use of NLP for Building Educational Applications, pp. 213\u2013227 (2019)","DOI":"10.18653\/v1\/W19-4423"},{"key":"31_CR6","doi-asserted-by":"crossref","unstructured":"Dong, C., et al.: A survey of natural language generation. ACM Comput. Surv. 1\u201338 (2023)","DOI":"10.1145\/3554727"},{"key":"31_CR7","doi-asserted-by":"crossref","unstructured":"Fan, Y., Jiang, F., Li, P., Li, H.: Grammargpt: exploring open-source LLMS for native Chinese grammatical error correction with supervised fine-tuning. In: Proceedings of NLPCC, pp. 69\u201380 (2023)","DOI":"10.1007\/978-3-031-44699-3_7"},{"key":"31_CR8","volume-title":"Is chatgpt a highly fluent grammatical error correction system?","author":"T Fang","year":"2023","unstructured":"Fang, T., et al.: Is chatgpt a highly fluent grammatical error correction system? A comprehensive evaluation, CoRR (2023)"},{"key":"31_CR9","doi-asserted-by":"crossref","unstructured":"Kaneko, M., Mita, M., Kiyono, S., Suzuki, J., Inui, K.: Encoder-decoder models can benefit from pre-trained masked language models in grammatical error correction. In: Proceedings of ACL, pp. 4248\u20134254 (2020)","DOI":"10.18653\/v1\/2020.acl-main.391"},{"key":"31_CR10","unstructured":"Kaneko, M., Okazaki, N.: Controlled generation with prompt insertion for natural language explanations in grammatical error correction. CoRR (2023)"},{"key":"31_CR11","doi-asserted-by":"crossref","unstructured":"Kaneko, M., Okazaki, N.: Reducing sequence length by predicting edit spans with large language models. In: Proceedings of EMNLP, pp. 10017\u201310029 (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.619"},{"key":"31_CR12","unstructured":"Kaplan, J., et al.: Scaling laws for neural language models. CoRR (2020)"},{"key":"31_CR13","unstructured":"Koyama, S., Takamura, H., Okazaki, N.: Various errors improve neural grammatical error correction. In: Proceedings of the 35th Pacific Asia Conference on Language, Information and Computation, PACLIC 2021, Shanghai International Studies University, Shanghai, China, 5-7 November 2021, pp. 251\u2013261 (2021)"},{"key":"31_CR14","doi-asserted-by":"crossref","unstructured":"Kwon, W., et al.: Efficient memory management for large language model serving with pagedattention. In: Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles (2023)","DOI":"10.1145\/3600006.3613165"},{"key":"31_CR15","doi-asserted-by":"crossref","unstructured":"Li, J., et al.: Sequence-to-action: grammatical error correction with action guided sequence generation. In: Proceedings of AAAI, pp. 10974\u201310982 (2022)","DOI":"10.1609\/aaai.v36i10.21345"},{"key":"31_CR16","doi-asserted-by":"crossref","unstructured":"Li, Y., et al.: TemplateGEC: improving grammatical error correction with detection template. In: Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (vol. 1: Long Papers), pp. 6878\u20136892 (2023)","DOI":"10.18653\/v1\/2023.acl-long.380"},{"key":"31_CR17","unstructured":"Li, Y., et al.: On the (in)effectiveness of large language models for Chinese text correction. CoRR (2023)"},{"key":"31_CR18","doi-asserted-by":"crossref","unstructured":"Li, Y., et al.: Towards real-world writing assistance: a Chinese character checking benchmark with faked and misspelled characters. CoRR (2023)","DOI":"10.18653\/v1\/2024.acl-long.469"},{"key":"31_CR19","doi-asserted-by":"crossref","unstructured":"Omelianchuk, K., Atrasevych, V., Chernodub, A.N., Skurzhanskyi, O.: Gector - grammatical error correction: tag, not rewrite. In: Proceedings of the Fifteenth Workshop on Innovative Use of NLP for Building Educational Applications, BEA@ACL 2020, Online, July 10, 2020, pp. 163\u2013170 (2020)","DOI":"10.18653\/v1\/2020.bea-1.16"},{"key":"31_CR20","unstructured":"Qu, F., Wu, Y.: Evaluating the capability of large-scale language models on Chinese grammatical error correction task. CoRR (2023)"},{"key":"31_CR21","unstructured":"Shao, Y., et al.: CPT: A pre-trained unbalanced transformer for both Chinese language understanding and generation. CoRR (2021)"},{"key":"31_CR22","doi-asserted-by":"crossref","unstructured":"Song, Y., Krishna, K., Bhatt, R., Gimpel, K., Iyyer, M.: Gee! grammar error explanation with large language models. CoRR (2023)","DOI":"10.18653\/v1\/2024.findings-naacl.49"},{"key":"31_CR23","unstructured":"Stahlberg, F., Kumar, S.: Synthetic data generation for grammatical error correction with tagged corruption models. In: Proceedings of EACL, pp. 37\u201347 (2021)"},{"key":"31_CR24","unstructured":"Wang, W., et al.: Structbert: incorporating language structures into pre-training for deep language understanding. In: Proceedings of ICLR (2020)"},{"key":"31_CR25","unstructured":"Wang, X., et\u00a0al.: Instructuie: Multi-task instruction tuning for unified information extraction. arXiv preprint arXiv:2304.08085 (2023)"},{"key":"31_CR26","doi-asserted-by":"crossref","unstructured":"Wang, Y., Kordi, Y., Mishra, S., Liu, A., Smith, N.A., Khashabi, D., Hajishirzi, H.: Self-instruct: aligning language models with self-generated instructions. In: Proceedings of ACL, pp. 13484\u201313508 (2023)","DOI":"10.18653\/v1\/2023.acl-long.754"},{"key":"31_CR27","doi-asserted-by":"crossref","unstructured":"Wang, Y., Wang, Y., Dang, K., Liu, J., Liu, Z.: A comprehensive survey of grammatical error correction. ACM Trans. Intell. Syst. Technol. 1\u201351 (2021)","DOI":"10.1145\/3474840"},{"key":"31_CR28","volume-title":"Chatgpt or grammarly?","author":"H Wu","year":"2023","unstructured":"Wu, H., Wang, W., Wan, Y., Jiao, W., Lyu, M.R.: Chatgpt or grammarly? Evaluating chatgpt on grammatical error correction benchmark, CoRR (2023)"},{"key":"31_CR29","doi-asserted-by":"crossref","unstructured":"Ye, J., Li, Y., Li, Y., Zheng, H.: Mixedit: revisiting data augmentation and beyond for grammatical error correction. In: Proceedings of EMNLP Findings, pp. 10161\u201310175 (2023)","DOI":"10.18653\/v1\/2023.findings-emnlp.681"},{"key":"31_CR30","unstructured":"Zhang, B.: Features and functions of the HSK dynamic composition corpus. Int. Chinese Language Edu. 71\u201379 (2009)"},{"key":"31_CR31","unstructured":"Zhang, Y., Cui, L., Cai, D., Huang, X., Fang, T., Bi, W.: Multi-task instruction tuning of llama for specific scenarios: a preliminary study on writing assistance. CoRR (2023)"},{"key":"31_CR32","doi-asserted-by":"crossref","unstructured":"Zhang, Y., et al.: MuCGEC: a multi-reference multi-source evaluation dataset for Chinese grammatical error correction. In: Proceedings of NAACL, pp. 3118\u20133130 (2022)","DOI":"10.18653\/v1\/2022.naacl-main.227"},{"key":"31_CR33","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Zhang, B., Li, Z., Bao, Z., Li, C., Zhang, M.: Syngec: syntax-enhanced grammatical error correction with a tailored gec-oriented parser. In: Proceedings of EMNLP, pp. 2518\u20132531 (2022)","DOI":"10.18653\/v1\/2022.emnlp-main.162"},{"key":"31_CR34","unstructured":"Zhao, W.X., et al.: A survey of large language models. CoRR (2023)"},{"key":"31_CR35","series-title":"Lecture Notes in Computer Science (Lecture Notes in Artificial Intelligence)","doi-asserted-by":"publisher","first-page":"439","DOI":"10.1007\/978-3-319-99501-4_41","volume-title":"Natural Language Processing and Chinese Computing","author":"Y Zhao","year":"2018","unstructured":"Zhao, Y., Jiang, N., Sun, W., Wan, X.: Overview of the NLPCC 2018 shared task: grammatical error correction. In: Zhang, M., Ng, V., Zhao, D., Li, S., Zan, H. (eds.) NLPCC 2018. LNCS (LNAI), vol. 11109, pp. 439\u2013445. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-319-99501-4_41"},{"key":"31_CR36","unstructured":"Zhou, C., et al.: LIMA: less is more for alignment. In: Proceedibgs of NeurIPS (2023)"},{"key":"31_CR37","doi-asserted-by":"crossref","unstructured":"Zhou, H., et al.: Improving seq2seq grammatical error correction via decoding interventions. In: Proceedings of EMNLP Findings, pp. 7393\u20137405 (2023)","DOI":"10.18653\/v1\/2023.findings-emnlp.495"}],"container-title":["Lecture Notes in Computer Science","Chinese Computational Linguistics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-97-8367-0_31","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,28]],"date-time":"2024-11-28T12:09:50Z","timestamp":1732795790000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-97-8367-0_31"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,29]]},"ISBN":["9789819783663","9789819783670"],"references-count":37,"URL":"https:\/\/doi.org\/10.1007\/978-981-97-8367-0_31","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,11,29]]},"assertion":[{"value":"29 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"CCL","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China National Conference on Chinese Computational Linguistics","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Taiyuan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"25 July 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 July 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"cncl2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/cips-cl.org\/static\/CCL2024\/en\/index.html","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}