{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T20:55:43Z","timestamp":1743022543226,"version":"3.40.3"},"publisher-location":"Cham","reference-count":52,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031306747"},{"type":"electronic","value":"9783031306754"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-30675-4_48","type":"book-chapter","created":{"date-parts":[[2023,4,14]],"date-time":"2023-04-14T10:02:24Z","timestamp":1681466544000},"page":"649-664","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Select, Extend, and\u00a0Generate: Generative Knowledge Selection for\u00a0Open-Domain Dialogue Response Generation"],"prefix":"10.1007","author":[{"given":"Sixing","family":"Wu","sequence":"first","affiliation":[]},{"given":"Ping","family":"Xue","sequence":"additional","affiliation":[]},{"given":"Ye","family":"Tao","sequence":"additional","affiliation":[]},{"given":"Ying","family":"Li","sequence":"additional","affiliation":[]},{"given":"Zhonghai","family":"Wu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,4,15]]},"reference":[{"key":"48_CR1","doi-asserted-by":"crossref","unstructured":"Bai, J., Yang, Z., Liang, X., Wang, W., Li, Z.: Learning to copy coherent knowledge for response generation. In: AAAI 2021 (2021)","DOI":"10.1609\/aaai.v35i14.17486"},{"key":"48_CR2","unstructured":"Brown, T.B., et al.: Language models are few-shot learners. CoRR abs\/2005.14165 (2020). https:\/\/arxiv.org\/abs\/2005.14165"},{"key":"48_CR3","doi-asserted-by":"crossref","unstructured":"Cho, K., van Merrienboer, B., Bahdanau, D., Bengio, Y.: On the properties of neural machine translation: encoder-decoder approaches. In: Wu, D., Carpuat, M., Carreras, X., Vecchi, E.M. (eds.) SSST@EMNLP 2014 (2014)","DOI":"10.3115\/v1\/W14-4012"},{"key":"48_CR4","doi-asserted-by":"crossref","unstructured":"Cui, L., Wu, Y., Liu, S., Zhang, Y.: Knowledge enhanced fine-tuning for better handling unseen entities in dialogue generation. In: EMNLP 2021, November 2021","DOI":"10.18653\/v1\/2021.emnlp-main.179"},{"key":"48_CR5","doi-asserted-by":"crossref","unstructured":"Cui, Y., Che, W., Liu, T., Qin, B., Yang, Z.: Pre-training with whole word masking for Chinese bert. IEEE\/ACM TASLP (2021)","DOI":"10.1109\/TASLP.2021.3124365"},{"key":"48_CR6","unstructured":"Devlin, J., Chang, M., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. In: NAACL-HLT 2019 (2019)"},{"key":"48_CR7","unstructured":"Dinan, E., Roller, S., Shuster, K., Fan, A., Auli, M., Weston, J.: Wizard of wikipedia: Knowledge-powered conversational agents. In: ICLR 2019 (2019)"},{"key":"48_CR8","doi-asserted-by":"crossref","unstructured":"Gu, X., Yoo, K.M., Ha, J.: Dialogbert: Discourse-aware response generation via learning to recover and rank utterances. In: AAAI2021 (2021)","DOI":"10.1609\/aaai.v35i14.17527"},{"key":"48_CR9","doi-asserted-by":"crossref","unstructured":"Ippolito, D., Kriz, R., Sedoc, J., Kustikova, M., Callison-Burch, C.: Comparison of diverse decoding methods from conditional language models. In: ACL 2019, July 2019","DOI":"10.18653\/v1\/P19-1365"},{"key":"48_CR10","unstructured":"Kim, B., Ahn, J., Kim, G.: Sequential latent knowledge selection for knowledge-grounded dialogue. In: ICLR 2020 (2020)"},{"key":"48_CR11","doi-asserted-by":"crossref","unstructured":"Lewis, M., et al.: BART: denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In: ACL 2020 (2020)","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"48_CR12","doi-asserted-by":"crossref","unstructured":"Li, J., Galley, M., Brockett, C., Gao, J., Dolan, B.: A diversity-promoting objective function for neural conversation models. In: NAACL 2016, June 2016","DOI":"10.18653\/v1\/N16-1014"},{"key":"48_CR13","unstructured":"Li, J., Monroe, W., Jurafsky, D.: A simple, fast diverse decoding algorithm for neural generation. CoRR abs\/1611.08562 (2016). http:\/\/arxiv.org\/abs\/1611.08562"},{"key":"48_CR14","unstructured":"Li, J., Tang, T., Zhao, W.X., Nie, J., Wen, J.: A survey of pretrained language models based text generation. CoRR abs\/2201.05273 (2022). https:\/\/arxiv.org\/abs\/2201.05273"},{"key":"48_CR15","doi-asserted-by":"crossref","unstructured":"Li, J., Tang, T., Zhao, W.X., Wei, Z., Yuan, N.J., Wen, J.R.: Few-shot knowledge graph-to-text generation with pretrained language models. In: Findings of ACL-IJCNLP 2021 (Aug 2021)","DOI":"10.18653\/v1\/2021.findings-acl.136"},{"key":"48_CR16","doi-asserted-by":"crossref","unstructured":"Li, J., Tang, T., Zhao, W.X., Wen, J.: Pretrained language models for text generation: A survey. CoRR abs\/2105.10311 (2021). https:\/\/arxiv.org\/abs\/2105.10311","DOI":"10.24963\/ijcai.2021\/612"},{"key":"48_CR17","doi-asserted-by":"crossref","unstructured":"Liang, Y., Meng, F., Zhang, Y., Chen, Y., Xu, J., Zhou, J.: Infusing multi-source knowledge with heterogeneous graph neural network for emotional conversation generation. In: AAAI 2021 (2021)","DOI":"10.1609\/aaai.v35i15.17575"},{"key":"48_CR18","unstructured":"Lin, C.Y.: ROUGE: a package for automatic evaluation of summaries. In: Text Summarization Branches Out, pp. 74\u201381. Association for Computational Linguistics, Barcelona, Spain, July 2004"},{"key":"48_CR19","unstructured":"Lin, T., Wang, Y., Liu, X., Qiu, X.: A survey of transformers. CoRR abs\/2106.04554 (2021). https:\/\/arxiv.org\/abs\/2106.04554"},{"key":"48_CR20","doi-asserted-by":"crossref","unstructured":"Lin, X., Jian, W., He, J., Wang, T., Chu, W.: Generating informative conversational response using recurrent knowledge-interaction and knowledge-copy. In: ACL 2020 (2020)","DOI":"10.18653\/v1\/2020.acl-main.6"},{"key":"48_CR21","doi-asserted-by":"crossref","unstructured":"Liu, C.W., Lowe, R., Serban, I., Noseworthy, M., Charlin, L., Pineau, J.: How NOT to evaluate your dialogue system: An empirical study of unsupervised evaluation metrics for dialogue response generation. In: EMNLP 2016, November 2016","DOI":"10.18653\/v1\/D16-1230"},{"key":"48_CR22","unstructured":"Liu, Y., et al.: Roberta: a robustly optimized BERT pretraining approach. CoRR abs\/1907.11692 (2019). http:\/\/arxiv.org\/abs\/1907.11692"},{"key":"48_CR23","doi-asserted-by":"crossref","unstructured":"Lotfi, E., Bruyn, M.D., Buhmann, J., Daelemans, W.: Teach me what to say and I will learn what to pick: Unsupervised knowledge selection through response generation with pretrained generative models. CoRR abs\/2110.02067 (2021). https:\/\/arxiv.org\/abs\/2110.02067","DOI":"10.18653\/v1\/2021.nlp4convai-1.24"},{"key":"48_CR24","doi-asserted-by":"crossref","unstructured":"Luong, T., Pham, H., Manning, C.D.: Effective approaches to attention-based neural machine translation. In: EMNLP 2015 (2015)","DOI":"10.18653\/v1\/D15-1166"},{"key":"48_CR25","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.: Bleu: a method for automatic evaluation of machine translation. In: ACL, pp. 311\u2013318. ACL (2002)","DOI":"10.3115\/1073083.1073135"},{"key":"48_CR26","doi-asserted-by":"crossref","unstructured":"Qin, L., Liu, Y., Che, W., Wen, H., Li, Y., Liu, T.: Entity-consistent end-to-end task-oriented dialogue system with KB retriever. In: Inui, K., Jiang, J., Ng, V., Wan, X. (eds.) EMNLP-IJCNLP 2019 (2019)","DOI":"10.18653\/v1\/D19-1013"},{"key":"48_CR27","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I.: Language models are unsupervised multitask learners (2019)"},{"key":"48_CR28","doi-asserted-by":"crossref","unstructured":"Ren, P., Chen, Z., Monz, C., Ma, J., de Rijke, M.: Thinking globally, acting locally: Distantly supervised global-to-local knowledge selection for background based conversation. In: AAAI 2020, pp. 8697\u20138704 (2020)","DOI":"10.1609\/aaai.v34i05.6395"},{"key":"48_CR29","doi-asserted-by":"crossref","unstructured":"See, A., Liu, P.J., Manning, C.D.: Get to the point: Summarization with pointer-generator networks. In: Barzilay, R., Kan, M. (eds.) ACL 2017 (2017). 10.18653\/v1\/P17-1099","DOI":"10.18653\/v1\/P17-1099"},{"key":"48_CR30","doi-asserted-by":"crossref","unstructured":"Serban, I.V., et al.: A hierarchical latent variable encoder-decoder model for generating dialogues. In: AAAI 2017 (2017)","DOI":"10.1609\/aaai.v31i1.10983"},{"key":"48_CR31","unstructured":"Shao, Y., et al.: CPT: a pre-trained unbalanced transformer for both Chinese language understanding and generation. CoRR abs\/2109.05729 (2021). https:\/\/arxiv.org\/abs\/2109.05729"},{"key":"48_CR32","doi-asserted-by":"crossref","unstructured":"Speer, R., Havasi, C.: Conceptnet 5: a large semantic network for relational knowledge. In: The People\u2019s Web Meets NLP, Collaboratively Constructed Language Resources (2013)","DOI":"10.1007\/978-3-642-35085-6_6"},{"key":"48_CR33","unstructured":"Sutskever, I., Vinyals, O., Le, Q.V.: Sequence to sequence learning with neural networks. In: Advances in Neural Information Processing Systems 27 (2014)"},{"key":"48_CR34","unstructured":"Velickovic, P., Cucurull, G., Casanova, A., Romero, A., Li\u00f2, P., Bengio, Y.: Graph attention networks. In: ICLR 2018 (2018)"},{"key":"48_CR35","unstructured":"Vinyals, O., Le, Q.V.: A neural conversational model. CoRR abs\/1506.05869 (2015). http:\/\/arxiv.org\/abs\/1506.05869"},{"key":"48_CR36","unstructured":"Wang, S., et al.: Modeling text-visual mutual dependency for multi-modal dialog generation. CoRR abs\/2105.14445 (2021). https:\/\/arxiv.org\/abs\/2105.14445"},{"key":"48_CR37","series-title":"Lecture Notes in Computer Science (Lecture Notes in Artificial Intelligence)","doi-asserted-by":"publisher","first-page":"91","DOI":"10.1007\/978-3-030-60450-9_8","volume-title":"Natural Language Processing and Chinese Computing","author":"Y Wang","year":"2020","unstructured":"Wang, Y., et al.: A large-scale chinese short-text conversation dataset. In: Zhu, X., Zhang, M., Hong, Yu., He, R. (eds.) NLPCC 2020. LNCS (LNAI), vol. 12430, pp. 91\u2013103. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-60450-9_8"},{"key":"48_CR38","unstructured":"Wu, S., Li, Y., Xue, P., Zhang, D., Wu, Z.: Section-aware commonsense knowledge-grounded dialogue generation with pre-trained language model. In: COLING 2022, pp. 521\u2013531. International Committee on Computational Linguistics (2022). https:\/\/aclanthology.org\/2022.coling-1.43"},{"key":"48_CR39","doi-asserted-by":"crossref","unstructured":"Wu, S., Li, Y., Zhang, D., Wu, Z.: Improving knowledge-aware dialogue response generation by using human-written prototype dialogues. In: Cohn, T., He, Y., Liu, Y. (eds.) Findings of EMNLP 2020 (2020)","DOI":"10.18653\/v1\/2020.findings-emnlp.126"},{"key":"48_CR40","doi-asserted-by":"publisher","unstructured":"Wu, S., Li, Y., Zhang, D., Wu, Z.: Generating rational commonsense knowledge-aware dialogue responses with channel-aware knowledge fusing network. IEEE ACM Trans. Audio Speech Lang. Process. 30, 3230\u20133239 (2022). https:\/\/doi.org\/10.1109\/TASLP.2022.3199649","DOI":"10.1109\/TASLP.2022.3199649"},{"key":"48_CR41","doi-asserted-by":"crossref","unstructured":"Wu, S., Li, Y., Zhang, D., Zhou, Y., Wu, Z.: Diverse and informative dialogue generation with context-specific commonsense knowledge awareness. In: ACL 202 (2020)","DOI":"10.18653\/v1\/2020.acl-main.515"},{"key":"48_CR42","doi-asserted-by":"crossref","unstructured":"Wu, S., Wang, M., Li, Y., Zhang, D., Wu, Z.: Improving the applicability of knowledge-enhanced dialogue generation systems by using heterogeneous knowledge from multiple sources. In: WSDM 22 (2022)","DOI":"10.1145\/3488560.3498393"},{"key":"48_CR43","doi-asserted-by":"crossref","unstructured":"Yan, R.: \u201cChitty-chitty-chat bot\u201d: deep learning for conversational AI. In: IJCAI 2018 (2018)","DOI":"10.24963\/ijcai.2018\/778"},{"key":"48_CR44","doi-asserted-by":"crossref","unstructured":"Young, T., Cambria, E., Chaturvedi, I., Zhou, H., Biswas, S., Huang, M.: Augmenting end-to-end dialogue systems with commonsense knowledge. In: AAAI 2018 (2018)","DOI":"10.1609\/aaai.v32i1.11923"},{"key":"48_CR45","unstructured":"Yu, W., et al.: A survey of knowledge-enhanced text generation. CoRR abs\/2010.04389 (2020). https:\/\/arxiv.org\/abs\/2010.04389"},{"key":"48_CR46","doi-asserted-by":"crossref","unstructured":"Zhang, H., Liu, Z., Xiong, C., Liu, Z.: Grounded conversation generation as guided traverses in commonsense knowledge graphs. In: ACL 2020 (2020)","DOI":"10.18653\/v1\/2020.acl-main.184"},{"key":"48_CR47","doi-asserted-by":"crossref","unstructured":"Zhang, Y., et al.: DIALOGPT: large-scale generative pre-training for conversational response generation. In: Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations, July 2020","DOI":"10.18653\/v1\/2020.acl-demos.30"},{"key":"48_CR48","unstructured":"Zhao, X., Wu, W., Tao, C., Xu, C., Zhao, D., Yan, R.: Low-resource knowledge-grounded dialogue generation. In: ICLR 2020 (2020)"},{"key":"48_CR49","doi-asserted-by":"crossref","unstructured":"Zhao, X., Wu, W., Xu, C., Tao, C., Zhao, D., Yan, R.: Knowledge-grounded dialogue generation with pre-trained language models. In: EMNLP 2020 (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.272"},{"key":"48_CR50","doi-asserted-by":"crossref","unstructured":"Zhou, H., Young, T., Huang, M., Zhao, H., Xu, J., Zhu, X.: Commonsense knowledge aware conversation generation with graph attention. In: IJCAI 2018 (2018)","DOI":"10.24963\/ijcai.2018\/643"},{"key":"48_CR51","doi-asserted-by":"crossref","unstructured":"Zhou, P., et al.: Commonsense-focused dialogues for response generation: an empirical study. In: SIGdial 2021 (2021)","DOI":"10.18653\/v1\/2021.sigdial-1.13"},{"key":"48_CR52","doi-asserted-by":"crossref","unstructured":"Zhou, P., et al.: Think before you speak: explicitly generating implicit commonsense knowledge for response generation. In: ACL 2022, May 2022","DOI":"10.18653\/v1\/2022.acl-long.88"}],"container-title":["Lecture Notes in Computer Science","Database Systems for Advanced Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-30675-4_48","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,12]],"date-time":"2024-03-12T12:15:24Z","timestamp":1710245724000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-30675-4_48"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031306747","9783031306754"],"references-count":52,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-30675-4_48","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"15 April 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"DASFAA","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Database Systems for Advanced Applications","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tianjin","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17 April 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"20 April 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"dasfaa2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.tjudb.cn\/dasfaa2023\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Microsoft CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"652","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"125","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"66","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"19% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"7.3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}