{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,19]],"date-time":"2026-02-19T11:30:00Z","timestamp":1771500600353,"version":"3.50.1"},"publisher-location":"Singapore","reference-count":28,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819794393","type":"print"},{"value":"9789819794409","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-981-97-9440-9_8","type":"book-chapter","created":{"date-parts":[[2024,10,31]],"date-time":"2024-10-31T17:06:20Z","timestamp":1730394380000},"page":"94-106","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["High-Quality Distractors Generation for\u00a0Human Exam Based on\u00a0Reinforcement Learning from\u00a0Preference Feedback"],"prefix":"10.1007","author":[{"given":"Ruofan","family":"Wang","sequence":"first","affiliation":[]},{"given":"Yuru","family":"Jiang","sequence":"additional","affiliation":[]},{"given":"Yuyang","family":"Tao","sequence":"additional","affiliation":[]},{"given":"Mengyuan","family":"Li","sequence":"additional","affiliation":[]},{"given":"Xia","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Shili","family":"Ge","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,1]]},"reference":[{"key":"8_CR1","doi-asserted-by":"crossref","unstructured":"Maurya, K.K., Desarkar, M.S.: Learning to distract: a hierarchical multi-decoder network for automated generation of long distractors for multiple-choice questions for reading comprehension. In: Proceedings of the 29th ACM International Conference on Information & Knowledge Management, pp. 1115\u20131124 (2020)","DOI":"10.1145\/3340531.3411997"},{"key":"8_CR2","doi-asserted-by":"publisher","first-page":"280","DOI":"10.1109\/TASLP.2021.3138706","volume":"30","author":"J Xie","year":"2021","unstructured":"Xie, J., Peng, N., Cai, Y., Wang, T., Huang, Q.: Diverse distractor generation for constructing high-quality multiple choice questions. IEEE\/ACM Trans. Audio Speech Lang. Process. 30, 280\u2013291 (2021)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"8_CR3","doi-asserted-by":"crossref","unstructured":"Gao, Y., Bing, L., Li, P., King, I., Lyu, M.R.: Generating distractors for reading comprehension questions from real examinations. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a033, pp. 6423\u20136430 (2019)","DOI":"10.1609\/aaai.v33i01.33016423"},{"key":"8_CR4","doi-asserted-by":"crossref","unstructured":"Zhou, X., Luo, S., Wu, Y.: Co-attention hierarchical network: generating coherent long distractors for reading comprehension. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a034, pp. 9725\u20139732 (2020)","DOI":"10.1609\/aaai.v34i05.6522"},{"key":"8_CR5","doi-asserted-by":"crossref","unstructured":"Qiu, Z., Wu, X., Fan, W.: Automatic distractor generation for multiple choice questions in standard tests. arXiv preprint arXiv:2011.13100 (2020)","DOI":"10.18653\/v1\/2020.coling-main.189"},{"key":"8_CR6","doi-asserted-by":"crossref","unstructured":"Shuai, P., Wei, Z., Liu, S., Xu, X., Li, L.: Topic enhanced multi-head co-attention: Generating distractors for reading comprehension. In: 2021 International Joint Conference on Neural Networks (IJCNN), pp.\u00a01\u20138. IEEE (2021)","DOI":"10.1109\/IJCNN52387.2021.9533341"},{"issue":"7","key":"8_CR7","doi-asserted-by":"publisher","first-page":"8275","DOI":"10.1007\/s10489-022-03894-6","volume":"53","author":"P Shuai","year":"2023","unstructured":"Shuai, P., Li, L., Liu, S., Shen, J.: Qdg: a unified model for automatic question-distractor pairs generation. Appl. Intell. 53(7), 8275\u20138285 (2023)","journal-title":"Appl. Intell."},{"issue":"8","key":"8_CR8","doi-asserted-by":"publisher","first-page":"1735","DOI":"10.1162\/neco.1997.9.8.1735","volume":"9","author":"S Hochreiter","year":"1997","unstructured":"Hochreiter, S., Schmidhuber, J.: Long short-term memory. Neural Comput. 9(8), 1735\u20131780 (1997)","journal-title":"Neural Comput."},{"key":"8_CR9","doi-asserted-by":"crossref","unstructured":"Cho, K., Van\u00a0Merri\u00ebnboer, B., Gulcehre, C., Bahdanau, D., Bougares, F., Schwenk, H., Bengio, Y.: Learning phrase representations using rnn encoder-decoder for statistical machine translation. arXiv preprint arXiv:1406.1078 (2014)","DOI":"10.3115\/v1\/D14-1179"},{"key":"8_CR10","unstructured":"Vaswani, A., et al.: Attention is all you need. Adv. Neural Inform. Process. Syst. 30 (2017)"},{"issue":"140","key":"8_CR11","first-page":"1","volume":"21","author":"C Raffel","year":"2020","unstructured":"Raffel, C., et al.: Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res. 21(140), 1\u201367 (2020)","journal-title":"J. Mach. Learn. Res."},{"key":"8_CR12","doi-asserted-by":"crossref","unstructured":"Lewis, M., et al.: Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. arXiv preprint arXiv:1910.13461 (2019)","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"8_CR13","unstructured":"Schulman, J., Zoph, B., Kim, C., et\u00a0al.: Introducing chatgpt. https:\/\/openai.com\/blog\/chatgpt"},{"key":"8_CR14","unstructured":"Touvron, H., et\u00a0al.: Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)"},{"key":"8_CR15","unstructured":"Touvron, H., et\u00a0al.: Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288 (2023)"},{"key":"8_CR16","doi-asserted-by":"crossref","unstructured":"Du, Z., Qian, Y., Liu, X., Ding, M., Qiu, J., Yang, Z., Tang, J.: Glm: General language model pretraining with autoregressive blank infilling. arXiv preprint arXiv:2103.10360 (2021)","DOI":"10.18653\/v1\/2022.acl-long.26"},{"key":"8_CR17","first-page":"24824","volume":"35","author":"J Wei","year":"2022","unstructured":"Wei, J., et al.: Chain-of-thought prompting elicits reasoning in large language models. Adv. Neural. Inf. Process. Syst. 35, 24824\u201324837 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"8_CR18","first-page":"27730","volume":"35","author":"L Ouyang","year":"2022","unstructured":"Ouyang, L., et al.: Training language models to follow instructions with human feedback. Adv. Neural. Inf. Process. Syst. 35, 27730\u201327744 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"8_CR19","unstructured":"Achiam, J., et\u00a0al.: Gpt-4 technical report. arXiv preprint arXiv:2303.08774 (2023)"},{"key":"8_CR20","unstructured":"Schulman, J., Wolski, F., Dhariwal, P., Radford, A., Klimov, O.: Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347 (2017)"},{"key":"8_CR21","doi-asserted-by":"crossref","unstructured":"Jia, X., Zhou, W., Sun, X., Wu, Y.: Eqg-race: Examination-type question generation. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a035, pp. 13143\u201313151 (2021)","DOI":"10.1609\/aaai.v35i14.17553"},{"key":"8_CR22","doi-asserted-by":"crossref","unstructured":"Lai, G., Xie, Q., Liu, H., Yang, Y., Hovy, E.: Race: Large-scale reading comprehension dataset from examinations. arXiv preprint arXiv:1704.04683 (2017)","DOI":"10.18653\/v1\/D17-1082"},{"key":"8_CR23","doi-asserted-by":"crossref","unstructured":"Li, J., Galley, M., Brockett, C., Gao, J., Dolan, B.: A diversity-promoting objective function for neural conversation models. arXiv preprint arXiv:1510.03055 (2015)","DOI":"10.18653\/v1\/N16-1014"},{"key":"8_CR24","unstructured":"Zhang, S., et\u00a0al.: Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068 (2022)"},{"key":"8_CR25","unstructured":"Hu, E.J., et al.: Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685 (2021)"},{"key":"8_CR26","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: Bleu: a method for automatic evaluation of machine translation. In: Proceedings of the 40th annual meeting of the Association for Computational Linguistics, pp. 311\u2013318 (2002)","DOI":"10.3115\/1073083.1073135"},{"key":"8_CR27","unstructured":"Rouge, L.C.: A package for automatic evaluation of summaries. In: Proceedings of Workshop on Text Summarization of ACL, Spain, vol.\u00a05 (2004)"},{"key":"8_CR28","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"}],"container-title":["Lecture Notes in Computer Science","Natural Language Processing and Chinese Computing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-97-9440-9_8","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,31]],"date-time":"2024-10-31T17:10:36Z","timestamp":1730394636000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-97-9440-9_8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,1]]},"ISBN":["9789819794393","9789819794409"],"references-count":28,"URL":"https:\/\/doi.org\/10.1007\/978-981-97-9440-9_8","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,1]]},"assertion":[{"value":"1 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"NLPCC","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"CCF International Conference on Natural Language Processing and Chinese Computing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Hangzhou","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2 November 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 November 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"13","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"nlpcc2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/tcci.ccf.org.cn\/conference\/2024\/index.php","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}