{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,22]],"date-time":"2025-11-22T11:36:25Z","timestamp":1763811385086,"version":"3.44.0"},"publisher-location":"New York, NY, USA","reference-count":29,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,12,15]],"date-time":"2023-12-15T00:00:00Z","timestamp":1702598400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"DOI":"10.13039\/501100006374","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62176023"],"award-info":[{"award-number":["62176023"]}],"id":[{"id":"10.13039\/501100006374","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,12,15]]},"DOI":"10.1145\/3639233.3639239","type":"proceedings-article","created":{"date-parts":[[2024,3,5]],"date-time":"2024-03-05T11:02:10Z","timestamp":1709636530000},"page":"153-160","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":1,"title":["CoPrompt: A Contrast-prompt Tuning Method for Multiparty Dialogue Character Relationship Extraction"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-4931-0755","authenticated-orcid":false,"given":"Yu","family":"Li","sequence":"first","affiliation":[{"name":"School of Computer Science, Beijing Information Science and Technology University, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0947-2640","authenticated-orcid":false,"given":"Yuru","family":"Jiang","sequence":"additional","affiliation":[{"name":"School of Computer Science, Beijing Information Science and Technology University, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-1610-1136","authenticated-orcid":false,"given":"Jie","family":"Chen","sequence":"additional","affiliation":[{"name":"School of Computer Science, Beijing Information Science and Technology University, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5364-3550","authenticated-orcid":false,"given":"Liangguo","family":"Wang","sequence":"additional","affiliation":[{"name":"School of Computer Science, Beijing Information Science and Technology University, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-9679-4982","authenticated-orcid":false,"given":"Yuyang","family":"Tao","sequence":"additional","affiliation":[{"name":"School of Computer Science, Beijing Information Science and Technology University, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0280-8455","authenticated-orcid":false,"given":"Yangsen","family":"Zhang","sequence":"additional","affiliation":[{"name":"School of Computer Science, Beijing Information Science and Technology University, China"}]}],"member":"320","published-online":{"date-parts":[[2024,3,5]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Language models are few-shot learners. Advances in neural information processing systems 33","author":"Brown Tom","year":"2020","unstructured":"Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared\u00a0D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, 2020. Language models are few-shot learners. Advances in neural information processing systems 33 (2020), 1877\u20131901."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1145\/3485447.3511998"},{"key":"e_1_3_2_1_3_1","volume-title":"An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929","author":"Dosovitskiy Alexey","year":"2020","unstructured":"Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, 2020. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)."},{"key":"e_1_3_2_1_4_1","volume-title":"Cert: Contrastive self-supervised learning for language understanding. arXiv preprint arXiv:2005.12766","author":"Fang Hongchao","year":"2020","unstructured":"Hongchao Fang, Sicheng Wang, Meng Zhou, Jiayuan Ding, and Pengtao Xie. 2020. Cert: Contrastive self-supervised learning for language understanding. arXiv preprint arXiv:2005.12766 (2020)."},{"key":"e_1_3_2_1_5_1","volume-title":"Attention guided graph convolutional networks for relation extraction. arXiv preprint arXiv:1906.07510","author":"Guo Zhijiang","year":"2019","unstructured":"Zhijiang Guo, Yan Zhang, and Wei Lu. 2019. Attention guided graph convolutional networks for relation extraction. arXiv preprint arXiv:1906.07510 (2019)."},{"key":"e_1_3_2_1_6_1","volume-title":"Warp: Word-level adversarial reprogramming. arXiv preprint arXiv:2101.00121","author":"Hambardzumyan Karen","year":"2021","unstructured":"Karen Hambardzumyan, Hrant Khachatrian, and Jonathan May. 2021. Warp: Word-level adversarial reprogramming. arXiv preprint arXiv:2101.00121 (2021)."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.aiopen.2022.11.003"},{"key":"e_1_3_2_1_8_1","volume-title":"Proceedings of the Thirteenth Language Resources and Evaluation Conference. 2337\u20132344","author":"Jiang Yuru","year":"2022","unstructured":"Yuru Jiang, Yang Xu, Yuhang Zhan, Weikai He, Yilin Wang, Zixuan Xi, Meiyun Wang, Xinyu Li, Yu Li, and Yanchao Yu. 2022. The CRECIL Corpus: a New Dataset for Extraction of Relations between Characters in Chinese Multi-party Dialogues. In Proceedings of the Thirteenth Language Resources and Evaluation Conference. 2337\u20132344."},{"key":"e_1_3_2_1_9_1","volume-title":"How can we know what language models know?Transactions of the Association for Computational Linguistics 8","author":"Jiang Zhengbao","year":"2020","unstructured":"Zhengbao Jiang, Frank\u00a0F Xu, Jun Araki, and Graham Neubig. 2020. How can we know what language models know?Transactions of the Association for Computational Linguistics 8 (2020), 423\u2013438."},{"key":"e_1_3_2_1_10_1","volume-title":"Proceedings of naacL-HLT, Vol.\u00a01. 2.","author":"Ming-Wei\u00a0Chang Jacob Devlin","year":"2019","unstructured":"Jacob Devlin Ming-Wei\u00a0Chang Kenton and Lee\u00a0Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of naacL-HLT, Vol.\u00a01. 2."},{"key":"e_1_3_2_1_11_1","volume-title":"Imagenet classification with deep convolutional neural networks. Advances in neural information processing systems 25","author":"Krizhevsky Alex","year":"2012","unstructured":"Alex Krizhevsky, Ilya Sutskever, and Geoffrey\u00a0E Hinton. 2012. Imagenet classification with deep convolutional neural networks. Advances in neural information processing systems 25 (2012)."},{"key":"e_1_3_2_1_12_1","volume-title":"Graph based network with contextualized representations of turns in dialogue. arXiv preprint arXiv:2109.04008","author":"Lee Bongseok","year":"2021","unstructured":"Bongseok Lee and Yong\u00a0Suk Choi. 2021. Graph based network with contextualized representations of turns in dialogue. arXiv preprint arXiv:2109.04008 (2021)."},{"key":"e_1_3_2_1_13_1","volume-title":"Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint arXiv:2101.00190","author":"Li Xiang\u00a0Lisa","year":"2021","unstructured":"Xiang\u00a0Lisa Li and Percy Liang. 2021. Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint arXiv:2101.00190 (2021)."},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1145\/3560815"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-short.8"},{"key":"e_1_3_2_1_16_1","volume-title":"Language models as knowledge bases?arXiv preprint arXiv:1909.01066","author":"Petroni Fabio","year":"2019","unstructured":"Fabio Petroni, Tim Rockt\u00e4schel, Patrick Lewis, Anton Bakhtin, Yuxiang Wu, Alexander\u00a0H Miller, and Sebastian Riedel. 2019. Language models as knowledge bases?arXiv preprint arXiv:1909.01066 (2019)."},{"key":"e_1_3_2_1_17_1","volume-title":"Socaog: Incremental graph parsing for social relation inference in dialogues. arXiv preprint arXiv:2106.01006","author":"Qiu Liang","year":"2021","unstructured":"Liang Qiu, Yuan Liang, Yizhou Zhao, Pan Lu, Baolin Peng, Zhou Yu, Ying\u00a0Nian Wu, and Song-Chun Zhu. 2021. Socaog: Incremental graph parsing for social relation inference in dialogues. arXiv preprint arXiv:2106.01006 (2021)."},{"key":"e_1_3_2_1_18_1","volume-title":"Constrained language models yield few-shot semantic parsers. arXiv preprint arXiv:2104.08768","author":"Shin Richard","year":"2021","unstructured":"Richard Shin, Christopher\u00a0H Lin, Sam Thomson, Charles Chen, Subhro Roy, Emmanouil\u00a0Antonios Platanios, Adam Pauls, Dan Klein, Jason Eisner, and Benjamin Van\u00a0Durme. 2021. Constrained language models yield few-shot semantic parsers. arXiv preprint arXiv:2104.08768 (2021)."},{"key":"e_1_3_2_1_19_1","volume-title":"Autoprompt: Eliciting knowledge from language models with automatically generated prompts. arXiv preprint arXiv:2010.15980","author":"Shin Taylor","year":"2020","unstructured":"Taylor Shin, Yasaman Razeghi, Robert\u00a0L Logan\u00a0IV, Eric Wallace, and Sameer Singh. 2020. Autoprompt: Eliciting knowledge from language models with automatically generated prompts. arXiv preprint arXiv:2010.15980 (2020)."},{"key":"e_1_3_2_1_20_1","volume-title":"Understanding LSTM\u2013a tutorial into long short-term memory recurrent neural networks. arXiv preprint arXiv:1909.09586","author":"Staudemeyer C","year":"2019","unstructured":"Ralf\u00a0C Staudemeyer and Eric\u00a0Rothstein Morris. 2019. Understanding LSTM\u2013a tutorial into long short-term memory recurrent neural networks. arXiv preprint arXiv:1909.09586 (2019)."},{"key":"e_1_3_2_1_21_1","volume-title":"Generalizing from a few examples: A survey on few-shot learning. ACM computing surveys (csur) 53, 3","author":"Wang Yaqing","year":"2020","unstructured":"Yaqing Wang, Quanming Yao, James\u00a0T Kwok, and Lionel\u00a0M Ni. 2020. Generalizing from a few examples: A survey on few-shot learning. ACM computing surveys (csur) 53, 3 (2020), 1\u201334."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i16.17670"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747486"},{"key":"e_1_3_2_1_24_1","volume-title":"Dialogue-based relation extraction. arXiv preprint arXiv:2004.08056","author":"Yu Dian","year":"2020","unstructured":"Dian Yu, Kai Sun, Claire Cardie, and Dong Yu. 2020. Dialogue-based relation extraction. arXiv preprint arXiv:2004.08056 (2020)."},{"key":"e_1_3_2_1_25_1","volume-title":"Recurrent neural network regularization. arXiv preprint arXiv:1409.2329","author":"Zaremba Wojciech","year":"2014","unstructured":"Wojciech Zaremba, Ilya Sutskever, and Oriol Vinyals. 2014. Recurrent neural network regularization. arXiv preprint arXiv:1409.2329 (2014)."},{"key":"e_1_3_2_1_26_1","volume-title":"Graph convolution over pruned dependency trees improves relation extraction. arXiv preprint arXiv:1809.10185","author":"Zhang Yuhao","year":"2018","unstructured":"Yuhao Zhang, Peng Qi, and Christopher\u00a0D Manning. 2018. Graph convolution over pruned dependency trees improves relation extraction. arXiv preprint arXiv:1809.10185 (2018)."},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D17-1004"},{"key":"e_1_3_2_1_28_1","volume-title":"Factual probing is [mask]: Learning vs. learning to recall. arXiv preprint arXiv:2104.05240","author":"Zhong Zexuan","year":"2021","unstructured":"Zexuan Zhong, Dan Friedman, and Danqi Chen. 2021. Factual probing is [mask]: Learning vs. learning to recall. arXiv preprint arXiv:2104.05240 (2021)."},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P16-2034"}],"event":{"name":"NLPIR 2023: 2023 7th International Conference on Natural Language Processing and Information Retrieval","acronym":"NLPIR 2023","location":"Seoul Republic of Korea"},"container-title":["Proceedings of the 2023 7th International Conference on Natural Language Processing and Information Retrieval"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3639233.3639239","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3639233.3639239","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T19:56:55Z","timestamp":1755892615000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3639233.3639239"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12,15]]},"references-count":29,"alternative-id":["10.1145\/3639233.3639239","10.1145\/3639233"],"URL":"https:\/\/doi.org\/10.1145\/3639233.3639239","relation":{},"subject":[],"published":{"date-parts":[[2023,12,15]]},"assertion":[{"value":"2024-03-05","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}