{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,25]],"date-time":"2025-10-25T12:47:25Z","timestamp":1761396445826,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":58,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789811989902"},{"type":"electronic","value":"9789811989919"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-981-19-8991-9_26","type":"book-chapter","created":{"date-parts":[[2023,1,18]],"date-time":"2023-01-18T08:04:02Z","timestamp":1674029042000},"page":"369-384","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["PoetryBERT: Pre-training with\u00a0Sememe Knowledge for\u00a0Classical Chinese Poetry"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3754-2201","authenticated-orcid":false,"given":"Jiaqi","family":"Zhao","sequence":"first","affiliation":[]},{"given":"Ting","family":"Bai","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3971-708X","authenticated-orcid":false,"given":"Yuting","family":"Wei","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7112-126X","authenticated-orcid":false,"given":"Bin","family":"Wu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,1,19]]},"reference":[{"key":"26_CR1","doi-asserted-by":"crossref","unstructured":"Alsentzer, E., et al.: Publicly available clinical BERT embeddings. In: Proceedings of the 2nd Clinical Natural Language Processing Workshop, pp. 72\u201378 (2019)","DOI":"10.18653\/v1\/W19-1909"},{"key":"26_CR2","doi-asserted-by":"crossref","unstructured":"Beltagy, I., Lo, K., Cohan, A.: SciBERT: a pretrained language model for scientific text. In: Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 3615\u20133620 (2019)","DOI":"10.18653\/v1\/D19-1371"},{"key":"26_CR3","unstructured":"Bordes, A., Usunier, N., Garcia-Duran, A., Weston, J., Yakhnenko, O.: Translating embeddings for modeling multi-relational data. In: Advances in Neural Information Processing Systems, vol. 26 (2013)"},{"key":"26_CR4","unstructured":"Brown, T.B., et al.: Language models are few-shot learners. arXiv preprint arXiv:2005.14165 (2020)"},{"key":"26_CR5","doi-asserted-by":"crossref","unstructured":"Chen, C., et al.: bert2BERT: towards reusable pretrained language models. In: Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2134\u20132148 (2022)","DOI":"10.18653\/v1\/2022.acl-long.151"},{"key":"26_CR6","doi-asserted-by":"crossref","unstructured":"Chen, X., et al.: Knowprompt: knowledge-aware prompt-tuning with synergistic optimization for relation extraction. In: Proceedings of the ACM Web Conference 2022, pp. 2778\u20132788 (2022)","DOI":"10.1145\/3485447.3511998"},{"key":"26_CR7","doi-asserted-by":"crossref","unstructured":"Cui, G., Hu, S., Ding, N., Huang, L., Liu, Z.: Prototypical verbalizer for prompt-based few-shot tuning. arXiv preprint arXiv:2203.09770 (2022)","DOI":"10.18653\/v1\/2022.acl-long.483"},{"key":"26_CR8","unstructured":"Cui, Y., et al.: Pre-training with whole word masking for Chinese BERT. arXiv preprint arXiv:1906.08101 (2019)"},{"key":"26_CR9","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. In: Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 4171\u20134186 (2019)"},{"key":"26_CR10","unstructured":"Dong, L., et al.: Unified language model pre-training for natural language understanding and generation. In: Proceedings of the 33rd International Conference on Neural Information Processing Systems (2019)"},{"key":"26_CR11","unstructured":"Dong, Z., Dong, Q., Hao, C.: HowNet and its computation of meaning. In: COLING 2010: Demonstrations, pp. 53\u201356 (2010)"},{"key":"26_CR12","unstructured":"Du, J., Qi, F., Sun, M., Liu, Z.: Lexical sememe prediction by dictionary definitions and localsemantic correspondence. J. Chin. Inf. Process. 34, 1\u20139 (2020)"},{"key":"26_CR13","doi-asserted-by":"crossref","unstructured":"Ghazvininejad, M., Choi, Y., Knight, K.: Neural poetry translation. In: Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pp. 67\u201371 (2018)","DOI":"10.18653\/v1\/N18-2011"},{"key":"26_CR14","doi-asserted-by":"crossref","unstructured":"Gu, Y., Han, X., Liu, Z., Huang, M.: PPT: pre-trained prompt tuning for few-shot learning. arXiv preprint arXiv:2109.04332 (2021)","DOI":"10.18653\/v1\/2022.acl-long.576"},{"key":"26_CR15","doi-asserted-by":"crossref","unstructured":"Guan, J., Huang, F., Zhao, Z., Zhu, X., Huang, M.: A knowledge-enhanced pretraining model for commonsense story generation. Trans. Assoc. Comput. Linguist. 8, 93\u2013108 (2020)","DOI":"10.1162\/tacl_a_00302"},{"key":"26_CR16","doi-asserted-by":"crossref","unstructured":"He, B., et al.: BERT-MK: integrating graph contextualized knowledge into pre-trained language models. In: Findings of the Association for Computational Linguistics: EMNLP 2020, pp. 2281\u20132290 (2020)","DOI":"10.18653\/v1\/2020.findings-emnlp.207"},{"key":"26_CR17","doi-asserted-by":"crossref","unstructured":"He, L., Zheng, S., Yang, T., Zhang, F.: KLMo: knowledge graph enhanced pretrained language model with fine-grained relationships. In: Findings of the Association for Computational Linguistics: EMNLP 2021, pp. 4536\u20134542 (2021)","DOI":"10.18653\/v1\/2021.findings-emnlp.384"},{"key":"26_CR18","doi-asserted-by":"crossref","unstructured":"Hong, L., Hou, W., Zhou, L.: Knowpoetry: A knowledge service platform for tang poetry research based on domain-specific knowledge graph. In: Library Trends, vol. 69, pp. 101\u2013124 (2020)","DOI":"10.1353\/lib.2020.0025"},{"key":"26_CR19","doi-asserted-by":"crossref","unstructured":"Hsu, C.J., Lee, H.Y., Tsao, Y.: XdBERT: distilling visual information to BERT from cross-modal systems to improve language understanding. arXiv preprint arXiv:2204.07316 (2022)","DOI":"10.18653\/v1\/2022.acl-short.52"},{"key":"26_CR20","unstructured":"Hu, R., Li, K., Zhu, Y.: Knowledge representation and sentence segmentation of ancient Chinese based on deep language models. J. Chin. Inf. Sci. 35, 8 (2021)"},{"key":"26_CR21","doi-asserted-by":"crossref","unstructured":"Joshi, M., Chen, D., Liu, Y., Weld, D.S., Zettlemoyer, L., Levy, O.: SpanBERT: improving pre-training by Representing and Predicting Spans. Trans. Assoc. Comput. Linguist. 8, 64\u201377 (2020)","DOI":"10.1162\/tacl_a_00300"},{"key":"26_CR22","doi-asserted-by":"crossref","unstructured":"Ke, P., Ji, H., Liu, S., Zhu, X., Huang, M.: SentiLARE: sentiment-aware language representation learning with linguistic knowledge. In: Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 6975\u20136988 (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.567"},{"key":"26_CR23","doi-asserted-by":"crossref","unstructured":"Lai, S., Xu, L., Liu, K., Zhao, J.: Recurrent convolutional neural networks for text classification. In: Proceedings of the AAAI Conference on Artificial Intelligence, pp. 2267\u20132273 (2015)","DOI":"10.1609\/aaai.v29i1.9513"},{"key":"26_CR24","unstructured":"Lample, G., Conneau, A.: Cross-lingual language model pretraining. In: Proceedings of the Advances in Neural Information Processing Systems, pp. 7057\u20137067 (2019)"},{"key":"26_CR25","unstructured":"Lan, Z., Chen, M., Goodman, S., Gimpel, K., Sharma, P., Soricut, R.: AlBERT: a lite BERT for self-supervised learning of language representations. In: Proceedings of the International Conference on Learning Representations (2020)"},{"key":"26_CR26","doi-asserted-by":"crossref","unstructured":"Lauscher, A., Vulic, I., Ponti, E.M., Korhonen, A., Glavas, G.: Specializing unsupervised pretraining models for word-level semantic similarity. In: Proceedings of the 28th International Conference on Computational Linguistics, pp. 1371\u20131383 (2020)","DOI":"10.18653\/v1\/2020.coling-main.118"},{"key":"26_CR27","doi-asserted-by":"crossref","unstructured":"Lee, J., et al.: BioBERT: a pre-trained biomedical language representation model for biomedical text mining. Bioinformatics 36, 1234\u20131240 (2019)","DOI":"10.1093\/bioinformatics\/btz682"},{"key":"26_CR28","unstructured":"Li, W., Qi, F., Sun, M., Yi, X., Zhang, J.: CCPM: a Chinese classical poetry matching dataset. arXiv preprint arXiv:2106.01979 (2021)"},{"key":"26_CR29","doi-asserted-by":"crossref","unstructured":"Lin, Z., et al.: Pre-training multilingual neural machine translation by leveraging alignment information. In: Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 2649\u20132663 (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.210"},{"key":"26_CR30","doi-asserted-by":"crossref","unstructured":"Liu, D., Yang, K., Qu, Q., Lv, J.: Ancient-modern Chinese translation with a new large training dataset. In: ACM Transactions on Asian and Low-Resource Language Information Processing, vol. 19, pp. 1\u201313 (2019)","DOI":"10.1145\/3325887"},{"key":"26_CR31","unstructured":"Liu, P., Yuan, W., Fu, J., Jiang, Z., Hayashi, H., Neubig, G.: Pre-train, prompt, and predict: a systematic survey of prompting methods in natural language processing. arXiv preprint arXiv:2107.13586 (2021)"},{"key":"26_CR32","doi-asserted-by":"crossref","unstructured":"Liu, W., et al.: K-BERT: enabling language representation with knowledge graph. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 2901\u20132908 (2020)","DOI":"10.1609\/aaai.v34i03.5681"},{"key":"26_CR33","unstructured":"Liu, Y., et al.: RoBERTa: a robustly optimized BERT pretraining approach. arXiv preprint arXiv:1907.11692 (2019)"},{"key":"26_CR34","unstructured":"Liu, Y., Wu, B., Bai, T.: The construction and analysis of the knowledge graph of classical Chinese poetry. In: Computer Research and Development, vol. 57, p. 1252 (2020)"},{"key":"26_CR35","unstructured":"Liu, Y., Wu, B., Xie, T., Wang, B.: New word detection in ancient Chinese corpus. J. Chin. Inf. Process. 33, 46\u201355 (2019)"},{"key":"26_CR36","unstructured":"Mahabadi, R.K., et al.: Perfect: prompt-free and efficient few-shot learning with language models. arXiv preprint arXiv:2204.01172 (2022)"},{"key":"26_CR37","doi-asserted-by":"crossref","unstructured":"Peters, M.E., et al.: Knowledge enhanced contextual word representations. In: Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 43\u201354 (2019)","DOI":"10.18653\/v1\/D19-1005"},{"key":"26_CR38","doi-asserted-by":"crossref","unstructured":"Qi, F., Lv, C., Liu, Z., Meng, X., Sun, M., Zheng, H.T.: Sememe prediction for babelnet synsets using multilingual and multimodal information. arXiv preprint arXiv:2203.07426 (2022)","DOI":"10.18653\/v1\/2022.findings-acl.15"},{"key":"26_CR39","doi-asserted-by":"crossref","unstructured":"Qi, F., Yang, Y., Yi, J., Cheng, Z., Liu, Z., Sun, M.: Quoter: a benchmark of quote recommendation for writing. arXiv preprint arXiv:2202.13145 (2022)","DOI":"10.18653\/v1\/2022.acl-long.27"},{"key":"26_CR40","doi-asserted-by":"crossref","unstructured":"Qin, Y., et al.: ERICA: improving entity and relation understanding for pre-trained language models via contrastive learning. In: Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics, pp. 3350\u20133363 (2021)","DOI":"10.18653\/v1\/2021.acl-long.260"},{"key":"26_CR41","unstructured":"Radford, A., Narasimhan, K., Salimans, T., Sutskever, I.: Improving language understanding by generative pre-training. OpenAI Blog (2018)"},{"key":"26_CR42","doi-asserted-by":"crossref","unstructured":"Shen, T., Mao, Y., He, P., Long, G., Trischler, A., Chen, W.: Exploiting structured knowledge in text via graph-guided representation learning. In: Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 8980\u20138994 (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.722"},{"key":"26_CR43","unstructured":"Song, K., Tan, X., Qin, T., Lu, J., Liu, T.Y.: MASS: masked sequence to sequence pre-training for language generation. In: Proceedings of the 36th International Conference on Machine Learning, vol. 97, pp. 5926\u20135936 (2019)"},{"key":"26_CR44","doi-asserted-by":"crossref","unstructured":"Sun, T., Shao, Y., Qiu, X., Guo, Q., Hu, Y., Huang, X., Zhang, Z.: CoLAKE: contextualized language and knowledge embedding. In: Proceedings of the 28th International Conference on Computational Linguistics, pp. 3660\u20133670 (2020)","DOI":"10.18653\/v1\/2020.coling-main.327"},{"key":"26_CR45","unstructured":"Sun, Y., et al.: Ernie: enhanced representation through knowledge integration. arXiv preprint arXiv:1904.09223 (2019)"},{"key":"26_CR46","doi-asserted-by":"crossref","unstructured":"Sun, Z., et al.: ChineseBERT: Chinese pretraining enhanced by glyph and Pinyin information. In: Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics, pp. 2065\u20132075 (2021)","DOI":"10.18653\/v1\/2021.acl-long.161"},{"key":"26_CR47","doi-asserted-by":"crossref","unstructured":"Tian, H., Yang, K., Liu, D., Lv, J.: AnchiBERT: a pre-trained model for ancient Chinese language understanding and generation. In: 2021 International Joint Conference on Neural Networks (IJCNN), pp. 1\u20138 (2021)","DOI":"10.1109\/IJCNN52387.2021.9534342"},{"key":"26_CR48","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems, vol. 30 (2017)"},{"key":"26_CR49","doi-asserted-by":"crossref","unstructured":"Wang, X., et al.: KEPLER: a unified model for knowledge embedding and pre-trained language representation. Trans. Assoc. Comput. Linguist. 9, 176\u2013194 (2021)","DOI":"10.1162\/tacl_a_00360"},{"key":"26_CR50","doi-asserted-by":"crossref","unstructured":"Wei, Y., Wang, H., Zhao, J., Liu, Y., Zhang, Y., Wu, B.: Gelaigelai: a visual platform for analysis of classical Chinese poetry based on knowledge graph. In: 2020 IEEE International Conference on Knowledge Graph (ICKG), pp. 513\u2013520 (2020)","DOI":"10.1109\/ICBK50248.2020.00078"},{"key":"26_CR51","unstructured":"Xiong, W., Du, J., Wang, W.Y., Stoyanov, V.: Pretrained encyclopedia: weakly supervised knowledge-pretrained language model. In: International Conference on Learning Representations (2020)"},{"key":"26_CR52","doi-asserted-by":"crossref","unstructured":"Yang, K., Liu, D., Qu, Q., Sang, Y., Lv, J.: An automatic evaluation metric for ancient-modern Chinese translation. Neural Comput. Appl. 33, 3855\u20133867 (2021)","DOI":"10.1007\/s00521-020-05216-8"},{"key":"26_CR53","doi-asserted-by":"crossref","unstructured":"Yang, Z., et al.: Generating classical Chinese poems from vernacular Chinese. In: Proceedings of the Conference on Empirical Methods in Natural Language Processing. Conference on Empirical Methods in Natural Language Processing, vol. 2019, p. 6155 (2019)","DOI":"10.18653\/v1\/D19-1637"},{"key":"26_CR54","unstructured":"Yang, Z., Dai, Z., Yang, Y., Carbonell, J., Salakhutdinov, R.R., Le, Q.V.: Xlnet: generalized autoregressive pretraining for language understanding. In: Proceedings of the Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"26_CR55","doi-asserted-by":"crossref","unstructured":"Yi, X., Li, R., Yang, C., Li, W., Sun, M.: Mixpoet: diverse poetry generation via learning controllable mixed latent space. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 9450\u20139457 (2020)","DOI":"10.1609\/aaai.v34i05.6488"},{"key":"26_CR56","unstructured":"Yu, D., Zhu, C., Yang, Y., Zeng, M.: Jaket: joint pre-training of knowledge graph and language understanding. arXiv preprint arXiv:2010.00796 (2020)"},{"key":"26_CR57","doi-asserted-by":"crossref","unstructured":"Zhang, Z., Han, X., Liu, Z., Jiang, X., Sun, M., Liu, Q.: ERNIE: enhanced language representation with informative entities. In: Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 1441\u20131451 (2019)","DOI":"10.18653\/v1\/P19-1139"},{"key":"26_CR58","doi-asserted-by":"crossref","unstructured":"Zhipeng, G., et al.: Jiuge: a human-machine collaborative Chinese classical poetry generation system. In: Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics: System Demonstrations, pp. 25\u201330 (2019)","DOI":"10.18653\/v1\/P19-3005"}],"container-title":["Communications in Computer and Information Science","Data Mining and Big Data"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-19-8991-9_26","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,18]],"date-time":"2023-01-18T08:15:46Z","timestamp":1674029746000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-19-8991-9_26"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9789811989902","9789811989919"],"references-count":58,"URL":"https:\/\/doi.org\/10.1007\/978-981-19-8991-9_26","relation":{},"ISSN":["1865-0929","1865-0937"],"issn-type":[{"type":"print","value":"1865-0929"},{"type":"electronic","value":"1865-0937"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"19 January 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"DMBD","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Data Mining and Big Data","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Beijing","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21 November 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"24 November 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"dmbd2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Easychair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"135","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"62","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"46% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.8","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2-3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}