{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,25]],"date-time":"2025-03-25T23:55:51Z","timestamp":1742946951664,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":15,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789811982996"},{"type":"electronic","value":"9789811983009"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-981-19-8300-9_25","type":"book-chapter","created":{"date-parts":[[2022,12,1]],"date-time":"2022-12-01T14:22:49Z","timestamp":1669904569000},"page":"228-235","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["The Method for Plausibility Evaluation of Knowledge Triple Based on QA"],"prefix":"10.1007","author":[{"given":"Shutong","family":"Jia","sequence":"first","affiliation":[]},{"given":"Jiuxin","family":"Cao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,12,2]]},"reference":[{"key":"25_CR1","doi-asserted-by":"crossref","unstructured":"Wang, X., et al.: KEPLER: a unified model for knowledge embedding and pre-trained language representation. Trans. Assoc. Comput. Linguist. 9, 176\u2013194 (2021)","DOI":"10.1162\/tacl_a_00360"},{"key":"25_CR2","unstructured":"Zhang, S., et al.: Quaternion knowledge graph embeddings. Adv. Neural Inform. Process. Syst. 32 (2019)"},{"key":"25_CR3","unstructured":"Lee, K., Devlin, J., Chang, M.-W., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. In: Proceedings of NAACL-HLT (2019)"},{"key":"25_CR4","unstructured":"Liu, Y., et al.: Roberta: a robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692 (2019)"},{"key":"25_CR5","unstructured":"Sun, Y., et al.: Ernie: enhanced representation through knowledge integration. arXiv preprint arXiv:1904.09223 (2019)"},{"key":"25_CR6","unstructured":"Yao, L., Chengsheng M., Yuan, L.: KG-BERT: BERT for knowledge graph completion. arXiv preprint arXiv:1909.03193 (2019)"},{"key":"25_CR7","unstructured":"Antoine, B., et al.: Translating embeddings for modeling multi-relational data. Adv. Neural Inform. Process. Syst. 26 (2013)"},{"key":"25_CR8","doi-asserted-by":"crossref","unstructured":"Wang, Z., et al. \u201cKnowledge graph embedding by translating on hyperplanes. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 28, no. 1 (2014)","DOI":"10.1609\/aaai.v28i1.8870"},{"key":"25_CR9","unstructured":"Sun, Z., et al.: RotatE: knowledge graph embedding by relational rotation in complex space. In: International Conference on Learning Representations (2018)"},{"key":"25_CR10","doi-asserted-by":"crossref","unstructured":"Wang, B., et al.: Structure-augmented text representation learning for efficient knowledge graph completion. In: Proceedings of the Web Conference 2021 (2021)","DOI":"10.1145\/3442381.3450043"},{"key":"25_CR11","unstructured":"Nils, R., Gurevych, I.: Sentence-BERT: sentence embeddings using siamese BERT-networks. In:  Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP) (2019)"},{"key":"25_CR12","doi-asserted-by":"crossref","unstructured":"Lv, X., et al.: Do Pre-trained Models Benefit Knowledge Graph Completion? A Reliable Evaluation and a Reasonable Approach. Findings of the Association for Computational Linguistics: ACL 2022 (2022)","DOI":"10.18653\/v1\/2022.findings-acl.282"},{"key":"25_CR13","doi-asserted-by":"crossref","unstructured":"Cui, Y., et al.: Pre-training with whole word masking for chinese BERT. IEEE\/ACM Trans. Audio Speech Lang. Process. 29, 3504\u20133514 (2021)","DOI":"10.1109\/TASLP.2021.3124365"},{"key":"25_CR14","doi-asserted-by":"crossref","unstructured":"Xiao, D., et al.: ERNIE-gram: pre-training with explicitly N-gram masked language modeling for natural language understanding. In: Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (2021)","DOI":"10.18653\/v1\/2021.naacl-main.136"},{"key":"25_CR15","unstructured":"Diederik, P.K., Ba, J.: Adam: A Method for Stochastic Optimization. ICLR (Poster) (2015)"}],"container-title":["Communications in Computer and Information Science","CCKS 2022 - Evaluation Track"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-19-8300-9_25","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,1]],"date-time":"2022-12-01T14:28:41Z","timestamp":1669904921000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-19-8300-9_25"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9789811982996","9789811983009"],"references-count":15,"URL":"https:\/\/doi.org\/10.1007\/978-981-19-8300-9_25","relation":{},"ISSN":["1865-0929","1865-0937"],"issn-type":[{"type":"print","value":"1865-0929"},{"type":"electronic","value":"1865-0937"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"2 December 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"CCKS","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China Conference on Knowledge Graph and Semantic Computing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Qinhuangdao","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"24 August 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 August 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"7","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ccks2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/sigkg.cn\/ccks2022\/en\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Easychair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"42","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"25","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"60% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4.5","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Evaluation Track: 25 papers accepted","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}