{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T15:41:17Z","timestamp":1775144477619,"version":"3.50.1"},"reference-count":153,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"7","license":[{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62306288"],"award-info":[{"award-number":["62306288"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62271452"],"award-info":[{"award-number":["62271452"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"National Key Research and Development Program of China","award":["2022YFB4500305"],"award-info":[{"award-number":["2022YFB4500305"]}]},{"name":"Key Research Project of Zhejiang Lab","award":["2022PI0AC01"],"award-info":[{"award-number":["2022PI0AC01"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Knowl. Data Eng."],"published-print":{"date-parts":[[2024,7]]},"DOI":"10.1109\/tkde.2024.3360454","type":"journal-article","created":{"date-parts":[[2024,1,31]],"date-time":"2024-01-31T18:38:42Z","timestamp":1706726322000},"page":"3091-3110","source":"Crossref","is-referenced-by-count":138,"title":["Give us the Facts: Enhancing Large Language Models With Knowledge Graphs for Fact-Aware Language Modeling"],"prefix":"10.1109","volume":"36","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0826-9453","authenticated-orcid":false,"given":"Linyao","family":"Yang","sequence":"first","affiliation":[{"name":"Zhejiang Lab, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7626-0162","authenticated-orcid":false,"given":"Hongyang","family":"Chen","sequence":"additional","affiliation":[{"name":"Zhejiang Lab, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5056-0351","authenticated-orcid":false,"given":"Zhao","family":"Li","sequence":"additional","affiliation":[{"name":"Zhejiang Lab, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5838-0320","authenticated-orcid":false,"given":"Xiao","family":"Ding","sequence":"additional","affiliation":[{"name":"Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, Harbin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2396-1704","authenticated-orcid":false,"given":"Xindong","family":"Wu","sequence":"additional","affiliation":[{"name":"Zhejiang Lab, Hangzhou, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2020.2994641"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2022.3153651"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2018.2866863"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1810.04805"},{"key":"ref5","first-page":"1","article-title":"Improving language understanding by generative pre-training","author":"Radford","year":"2018"},{"issue":"140","key":"ref6","first-page":"1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"J. Mach. Learn. Res."},{"key":"ref7","article-title":"Emergent abilities of large language models","author":"Wei","year":"2022"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1250"},{"key":"ref9","article-title":"Language models are open knowledge graphs","author":"Wang","year":"2020"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.eacl-main.153"},{"key":"ref11","article-title":"Chatgpt is a knowledgeable but inexperienced solver: An investigation of commonsense problem in large language models","author":"Bian","year":"2023"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.251"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.146"},{"key":"ref14","article-title":"Evaluating the logical reasoning ability of chatgpt and GPT-4","author":"Liu","year":"2023"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.ijcnlp-main.45"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1145\/3477314.3507066"},{"key":"ref17","article-title":"Knowledge enhanced pretrained language models: A compreshensive survey","author":"Wei","year":"2021"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/tkde.2023.3310002"},{"key":"ref19","article-title":"A survey on knowledge-enhanced pre-trained language models","author":"Zhen","year":"2022"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2021.3090866"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1016\/j.eng.2022.04.024"},{"key":"ref22","first-page":"1","article-title":"ELECTRA: Pre-training text encoders as discriminators rather than generators","volume-title":"Proc. 8th Int. Conf. Learn. Representations","author":"Clark"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00300"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.132"},{"key":"ref25","article-title":"Albert: A lite bert for self-supervised learning of language representations","author":"Lan","year":"2019"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N18-1202"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref29","article-title":"RoBERTa: A robustly optimized bert pretraining approach","author":"Liu","year":"2019"},{"key":"ref30","article-title":"DistilBERT, a distilled version of bert: Smaller, faster, cheaper and lighter","author":"Sanh","year":"2019"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1139"},{"key":"ref32","first-page":"1","article-title":"DeBERTa: Decoding-enhanced BERT with disentangled attention","volume-title":"Proc. Int. Conf. Learn. Representations","author":"He"},{"issue":"8","key":"ref33","first-page":"1","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI Blog"},{"key":"ref34","first-page":"5753","article-title":"XLNet: Generalized autoregressive pretraining for language understanding","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Yang"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.722"},{"issue":"1","key":"ref36","first-page":"5232","article-title":"Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity","volume":"23","author":"Fedus","year":"2022","journal-title":"J. Mach. Learn. Res."},{"key":"ref37","article-title":"Scaling instruction-finetuned language models","author":"Chung","year":"2022"},{"key":"ref38","article-title":"GLM-130B: An open bilingual pre-trained model","author":"Zeng","year":"2022"},{"key":"ref39","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inform. Process. Syst.","author":"Brown"},{"key":"ref40","first-page":"5547","article-title":"GLaM: Efficient scaling of language models with mixture-of-experts","volume-title":"Proc. 39th Int. Conf. Mach. Learn.","author":"Du"},{"key":"ref41","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume-title":"Proc. Adv. Neural Inform. Process. Syst.","author":"Ouyang"},{"key":"ref42","article-title":"PaLM: Scaling language modeling with pathways","author":"Chowdhery","year":"2022"},{"key":"ref43","article-title":"LaMDA: Language models for dialog applications","author":"Thoppilan","year":"2022"},{"key":"ref44","article-title":"OPT: Open pre-trained transformer language models","author":"Zhang","year":"2022"},{"key":"ref45","article-title":"LLaMA: Open and efficient foundation language models","author":"Touvron","year":"2023"},{"key":"ref46","article-title":"Stanford alpaca: An instruction-following llama model","author":"Taori","year":"2023"},{"key":"ref47","article-title":"GPT-4 technical report","year":"2023"},{"key":"ref48","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023"},{"key":"ref49","article-title":"A survey of large language models","author":"Zhao","year":"2023"},{"issue":"4","key":"ref50","first-page":"705","article-title":"The chatGPT after: Opportunities and challenges of very large scale pre-trained models","volume":"49","author":"Lu","year":"2023","journal-title":"Acta Autom. Sin."},{"key":"ref51","first-page":"4299","article-title":"Deep reinforcement learning from human preferences","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Christiano"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1145\/3560815"},{"key":"ref53","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017"},{"key":"ref54","article-title":"Chain of thought prompting elicits reasoning in large language models","author":"Wei","year":"2022"},{"key":"ref55","article-title":"Sparks of artificial general intelligence: Early experiments with GPT-4","author":"Bubeck","year":"2023"},{"key":"ref56","article-title":"A survey for in-context learning","author":"Dong","year":"2022"},{"key":"ref57","article-title":"FineTuned language models are zero-shot learners","author":"Wei","year":"2021"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1145\/3580305.3599208"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.296"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1145\/3512467"},{"key":"ref61","article-title":"Chain of knowledge: A framework for grounding large language models with structured knowledge bases","author":"Li","year":"2023"},{"key":"ref62","article-title":"Measuring causal effects of data statistics on language models \u2018factual\u2019 predictions","author":"Yanai","year":"2022"},{"key":"ref63","article-title":"Think-on-graph: Deep and responsible reasoning of large language model with knowledge graph","author":"Sun","year":"2023"},{"key":"ref64","article-title":"Language models are greedy reasoners: A systematic formal analysis of chain-of-thought","author":"Abulhair","year":"2022"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1162\/coli_a_00492"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/taslp.2023.3325973"},{"key":"ref67","article-title":"GreaseLM: Graph reasoning enhanced language models for question answering","author":"Zhang","year":"2022"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.24"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.nlrse-1.7"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.301"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.naacl-main.386"},{"key":"ref72","article-title":"Interpreting language models through knowledge graph extraction","author":"Swamy","year":"2021"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i03.5681"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.coling-main.327"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.semeval-1.60"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.523"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.71"},{"key":"ref78","article-title":"Knowledge-aware language model pretraining","author":"Rosset","year":"2020"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1145\/3534678.3539210"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i10.21425"},{"key":"ref81","article-title":"Align, mask and select: A simple method for incorporating commonsense knowledge into language representation models","author":"Ye","year":"2019"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.697"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1598"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33013027"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00360"},{"key":"ref86","article-title":"Pretrained Encyclopedia: Weakly supervised knowledge-pretrained language model","author":"Xiong","year":"2019"},{"key":"ref87","article-title":"ERNIE 3.0: Large-scale knowledge enhanced pre-training for language understanding and generation","author":"Sun","year":"2021"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.207"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1016\/j.aiopen.2021.06.004"},{"key":"ref90","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.naacl-main.372"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/d19-1016"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00476"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.naacl-main.45"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-emnlp.384"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1005"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i10.21417"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i7.16796"},{"key":"ref98","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-acl.121"},{"key":"ref99","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.deelio-1.5"},{"key":"ref100","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-emnlp.325"},{"key":"ref101","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2023.03.002"},{"key":"ref102","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.423"},{"key":"ref103","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.260"},{"key":"ref104","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.567"},{"key":"ref105","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i05.6428"},{"key":"ref106","first-page":"37309","article-title":"Deep bidirectional language-knowledge graph pretraining","volume-title":"Proc. Int. Conf. Neural Inform. Process. Syst.","author":"Yasunaga"},{"key":"ref107","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i05.6298"},{"key":"ref108","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.naacl-main.379"},{"key":"ref109","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2022.109460"},{"key":"ref110","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1282"},{"key":"ref111","doi-asserted-by":"publisher","DOI":"10.1093\/bib\/bbaa110"},{"key":"ref112","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.deelio-1.9"},{"key":"ref113","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i10.21286"},{"key":"ref114","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.207"},{"key":"ref115","first-page":"2787","article-title":"Translating embeddings for modeling multi-relational data","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Bordes"},{"key":"ref116","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.eacl-main.217"},{"key":"ref117","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i16.29721"},{"key":"ref118","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.eacl-main.20"},{"key":"ref119","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i14.17490"},{"key":"ref120","article-title":"End-to-end named entity recognition and relation extraction using pre-trained language models","author":"Giorgi","year":"2019"},{"key":"ref121","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.bionlp-1.20"},{"key":"ref122","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-2039"},{"key":"ref123","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i05.6299"},{"key":"ref124","doi-asserted-by":"publisher","DOI":"10.1145\/3578741.3578781"},{"key":"ref125","doi-asserted-by":"publisher","DOI":"10.1145\/3511808.3557459"},{"key":"ref126","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.435"},{"key":"ref127","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.coling-main.49"},{"key":"ref128","doi-asserted-by":"publisher","DOI":"10.1109\/tkde.2023.3301884"},{"key":"ref129","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2021.3110898"},{"key":"ref130","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2022.09.020"},{"key":"ref131","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2021.3108224"},{"key":"ref132","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2018.2879863"},{"key":"ref133","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.650"},{"key":"ref134","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00302"},{"key":"ref135","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.54"},{"key":"ref136","first-page":"1","article-title":"Creative storytelling with language models and knowledge graphs","volume-title":"Proc. CIKM Workshops Co-Located 29th ACM Int. Conf. Inf. Knowl. Manage.","author":"Yang"},{"key":"ref137","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.403"},{"key":"ref138","article-title":"A review on language models as knowledge bases","author":"AlKhamissi","year":"2022"},{"key":"ref139","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00342"},{"key":"ref140","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1393"},{"key":"ref141","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.388"},{"key":"ref142","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-emnlp.147"},{"key":"ref143","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.eacl-main.284"},{"key":"ref144","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1493"},{"key":"ref145","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.398"},{"key":"ref146","article-title":"Large language models struggle to learn long-tail knowledge","author":"Kandpal","year":"2022"},{"key":"ref147","article-title":"BertNet: Harvesting knowledge graphs from pretrained language models","author":"Hao","year":"2022"},{"key":"ref148","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1470"},{"key":"ref149","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00605"},{"key":"ref150","article-title":"PaLM-E: An embodied multimodal language model","author":"Driess","year":"2023"},{"key":"ref151","article-title":"Understanding the integration of knowledge in language models with graph convolutions","author":"Hou","year":"2022"},{"key":"ref152","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.blackboxnlp-1.3"},{"key":"ref153","article-title":"LMExplainer: A knowledge-enhanced explainer for language models","author":"Chen","year":"2023"}],"container-title":["IEEE Transactions on Knowledge and Data Engineering"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/69\/10549876\/10417790.pdf?arnumber=10417790","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,6,26]],"date-time":"2024-06-26T13:21:54Z","timestamp":1719408114000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10417790\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,7]]},"references-count":153,"journal-issue":{"issue":"7"},"URL":"https:\/\/doi.org\/10.1109\/tkde.2024.3360454","relation":{},"ISSN":["1041-4347","1558-2191","2326-3865"],"issn-type":[{"value":"1041-4347","type":"print"},{"value":"1558-2191","type":"electronic"},{"value":"2326-3865","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,7]]}}}