{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,15]],"date-time":"2026-04-15T03:13:51Z","timestamp":1776222831349,"version":"3.50.1"},"reference-count":210,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"12","license":[{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"am","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Knowl. Data Eng."],"published-print":{"date-parts":[[2024,12]]},"DOI":"10.1109\/tkde.2024.3469578","type":"journal-article","created":{"date-parts":[[2024,9,27]],"date-time":"2024-09-27T18:38:05Z","timestamp":1727462285000},"page":"8622-8642","source":"Crossref","is-referenced-by-count":70,"title":["Large Language Models on Graphs: A Comprehensive Survey"],"prefix":"10.1109","volume":"36","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1295-2829","authenticated-orcid":false,"given":"Bowen","family":"Jin","sequence":"first","affiliation":[{"name":"University of Illinois at Urbana-Champaign, Champaign, IL, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4204-731X","authenticated-orcid":false,"given":"Gang","family":"Liu","sequence":"additional","affiliation":[{"name":"University of Notre Dame, Notre Dame, IN, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6235-5841","authenticated-orcid":false,"given":"Chi","family":"Han","sequence":"additional","affiliation":[{"name":"University of Illinois at Urbana-Champaign, Champaign, IL, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3009-519X","authenticated-orcid":false,"given":"Meng","family":"Jiang","sequence":"additional","affiliation":[{"name":"University of Notre Dame, Notre Dame, IN, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0464-7966","authenticated-orcid":false,"given":"Heng","family":"Ji","sequence":"additional","affiliation":[{"name":"University of Illinois at Urbana-Champaign, Champaign, IL, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3629-2696","authenticated-orcid":false,"given":"Jiawei","family":"Han","sequence":"additional","affiliation":[{"name":"University of Illinois at Urbana-Champaign, Champaign, IL, USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/n19-4013"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1387"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W18-5446"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1410"},{"key":"ref5","article-title":"Emergent abilities of large language models","author":"Wei","year":"2022","journal-title":"Trans. Mach. Learn. Res."},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1017\/cbo9780511721649"},{"key":"ref7","first-page":"156","article-title":"Computing the shortest path: A search meets graph theory","volume-title":"Proc. 16th Annu. ACM-SIAM Symp. Discrete Algorithms","author":"Goldberg"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.14778\/2311906.2311907"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3655103.3655110"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1023\/A:1009953814988"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/276675.276685"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1162\/qss_a_00021"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/3543507.3583354"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3240323.3240369"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1018"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1609\/aimag.v29i3.2157"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00360"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/3447548.3467375"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.2978386"},{"key":"ref20","article-title":"Towards graph foundation models: A survey and beyond","author":"Liu","year":"2023"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/tkde.2024.3352100"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.68"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1810.04805"},{"key":"ref24","article-title":"RoBERTa: A robustly optimized bert pretraining approach","author":"Liu","year":"2019"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1371"},{"key":"ref26","article-title":"Language models are few-shot learners","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Brown"},{"key":"ref27","first-page":"5754","article-title":"XLNet: Generalized autoregressive pretraining for language understanding","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Yang"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1525\/9780520940420-020"},{"key":"ref29","first-page":"140:1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"J. Mach. Learn. Res."},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.551"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.387"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.692"},{"key":"ref33","first-page":"16857","article-title":"MPNet: Masked and permuted pre-training for language understanding","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Song"},{"key":"ref34","article-title":"SimTeG: A frustratingly simple approach improves textual graph learning","author":"Duan","year":"2023"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1016\/j.lindif.2023.102274"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.353"},{"key":"ref38","first-page":"2790","article-title":"Parameter-efficient transfer learning for NLP","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Houlsby"},{"key":"ref39","article-title":"Lora: Low-rank adaptation of large language models","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Hu"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i17.29875"},{"key":"ref41","article-title":"GraphLLM: Boosting graph reasoning ability of large language model","author":"Chai","year":"2023"},{"key":"ref42","article-title":"Finetuned language models are zero-shot learners","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wei"},{"key":"ref43","article-title":"Multitask prompted training enables zero-shot task generalization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Sanh"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1145\/3626772.3657775"},{"key":"ref45","article-title":"Natural language is all a graph needs","author":"Ye","year":"2023"},{"key":"ref46","doi-asserted-by":"crossref","DOI":"10.1101\/2023.05.30.542904","article-title":"GIMLET: A unified graph-text model for instruction-based molecule zero-shot learning","author":"Zhao","year":"2023"},{"key":"ref47","first-page":"24824","article-title":"Chain-of-thought prompting elicits reasoning in large language models","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Wei"},{"key":"ref48","article-title":"Tree of thoughts: Deliberate problem solving with large language models","author":"Yao","year":"2023"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i16.29720"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.207"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.802"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.textgraphs-1.2"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1145\/3626772.3657978"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.181"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1145\/3580305.3599921"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1145\/3442381.3450114"},{"key":"ref57","article-title":"Node feature extraction by self-supervised multi-scale neighborhood prediction","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Chien"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1145\/3485447.3512174"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/icdmw60847.2023.00142"},{"key":"ref60","first-page":"13308","article-title":"WalkLM: A uniform language model fine-tuning framework for attributed graph embedding","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Tan"},{"key":"ref61","article-title":"Learning on large-scale text-attributed graphs via variational inference","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhao"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1145\/3539618.3591641"},{"key":"ref63","article-title":"Label-free node classification on graphs with large language models (LLMS)","author":"Chen","year":"2023"},{"key":"ref64","article-title":"GraphText: Graph reasoning in text space","author":"Zhao","year":"2023"},{"key":"ref65","article-title":"GNN-LM: Language modeling based on global contexts via GNN","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Meng"},{"key":"ref66","article-title":"GreaseLM: Graph reasoning enhanced language models for question answering","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhang"},{"key":"ref67","article-title":"Efficient and effective training of language and graph neural network models","volume-title":"Proc. AAAI Conf. Artif. Intell.","author":"Ioannidis"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-43418-1_10"},{"key":"ref69","article-title":"Explanations as features: LLM-based features for text-attributed graphs","author":"He","year":"2023"},{"key":"ref70","article-title":"Empower text-attributed graphs learning with large language models (LLMs)","author":"Yu","year":"2023"},{"key":"ref71","first-page":"28798","article-title":"GraphFormers: GNN-nested transformers for representation learning on textual graph","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Yang"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1145\/3580305.3599376"},{"key":"ref73","article-title":"Edgeformers: Graph-empowered transformers for representation learning on textual-edge networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Jin"},{"key":"ref74","article-title":"Learning multiplex embeddings on text-rich networks with one text encoder","author":"Jin","year":"2023"},{"key":"ref75","article-title":"Disentangled representation learning with large language models for text-attributed graphs","author":"Qin","year":"2023"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1145\/3442381.3449842"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1145\/3404835.3462926"},{"key":"ref78","first-page":"7267","article-title":"Fast multi-resolution transformer fine-tuning for extreme multi-label text classification","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Zhang"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1145\/3580305.3599833"},{"key":"ref80","first-page":"37309","article-title":"Deep bidirectional language-knowledge graph pretraining","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Yasunaga"},{"key":"ref81","article-title":"Can LLMs effectively leverage graph structural information: When and why","author":"Huang","year":"2023"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.830"},{"key":"ref83","article-title":"Semi-supervised classification with graph convolutional networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kipf"},{"key":"ref84","first-page":"1025","article-title":"Inductive representation learning on large graphs","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Hamilton"},{"key":"ref85","article-title":"Graph attention networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Veli\u010dkovi\u0107"},{"key":"ref86","article-title":"Graph-less neural networks: Teaching old MLPs new tricks via distillation","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhang"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3403076"},{"key":"ref88","first-page":"462","article-title":"Generating training data with language models: Towards zero-shot language understanding","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Meng"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.14778\/3402707.3402736"},{"key":"ref90","article-title":"Visual instruction tuning","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Liu"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5985"},{"key":"ref92","article-title":"Attention is all you need","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Vaswani"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1145\/511446.511513"},{"key":"ref94","article-title":"Representation learning with contrastive predictive coding","author":"Oord","year":"2018"},{"key":"ref95","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.4"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-short.48"},{"key":"ref98","article-title":"Legal networks: The promises and challenges of legal network analysis","author":"Whalen","year":"2016","journal-title":"Michigan State Law Rev."},{"key":"ref99","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P16-1166"},{"key":"ref100","doi-asserted-by":"publisher","DOI":"10.2139\/ssrn.4583531"},{"key":"ref101","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.ecnlp-1.6"},{"key":"ref102","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2018.2890388"},{"key":"ref103","doi-asserted-by":"publisher","DOI":"10.1109\/IETC47856.2020.9249211"},{"key":"ref104","article-title":"Author identification on the large scale","volume-title":"Proc. Meeting Classification Soc. North Amer.","author":"Madigan"},{"key":"ref105","doi-asserted-by":"publisher","DOI":"10.1145\/3397271.3401063"},{"key":"ref106","doi-asserted-by":"publisher","DOI":"10.1145\/3397271.3401198"},{"key":"ref107","doi-asserted-by":"publisher","DOI":"10.1145\/3308558.3313644"},{"key":"ref108","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.2977407"},{"key":"ref109","doi-asserted-by":"publisher","DOI":"10.1145\/2858036.2858107"},{"key":"ref110","article-title":"Overlapping community detection with graph neural networks","author":"Shchur","year":"2019"},{"key":"ref111","first-page":"22199","article-title":"Large language models are zero-shotreasoners","volume":"35","author":"Kojima","year":"2022","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref112","first-page":"22199","article-title":"Large language models are zero-shot reasoners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Kojima"},{"key":"ref113","first-page":"24824","article-title":"Chain-of-thought prompting elicits reasoning in large language models","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Wei"},{"issue":"8","key":"ref114","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI Blog"},{"key":"ref115","article-title":"ALBERT: A lite bert for self-supervised learning of language representations","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Lan"},{"key":"ref116","article-title":"ELECTRA: Pre-training text encoders as discriminators rather than generators","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Clark"},{"key":"ref117","article-title":"Sparks of artificial general intelligence: Early experiments with GPT-4","author":"Bubeck","year":"2023"},{"key":"ref118","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023"},{"key":"ref119","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-55560-2_5"},{"key":"ref120","first-page":"23716","article-title":"Flamingo: A visual language model for few-shot learning","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Alayrac"},{"key":"ref121","first-page":"595","article-title":"Text2Mol: Cross-modal molecule retrieval with natural language queries","volume-title":"Proc. Conf. Empir. Methods Natural Lang. Process.","author":"Edwards"},{"key":"ref122","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.26"},{"key":"ref123","article-title":"Can language models solve graph problems in natural language?","author":"Wang","year":"2023"},{"key":"ref124","article-title":"Evaluating large language models on graphs: Performance insights and comparative analysis","author":"Liu","year":"2023"},{"key":"ref125","article-title":"GPT4Graph: Can large language models understand graph structured data? an empirical evaluation and benchmarking","author":"Guo","year":"2023"},{"key":"ref126","article-title":"Graph-ToolFormer: To empower LLMs with graph reasoning ability via prompt augmented by ChatGPT","author":"Zhang","year":"2023"},{"key":"ref127","article-title":"LLM4DyG: Can large language models solve problems on dynamic graphs?","author":"Zhang","year":"2023"},{"key":"ref128","article-title":"Reasoning on graphs: Faithful and interpretable large language model reasoning","author":"Luo","year":"2023"},{"key":"ref129","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.574"},{"key":"ref130","article-title":"Talk like a graph: Encoding graphs for large language models","author":"Fatemi","year":"2023"},{"key":"ref131","article-title":"Think-on-graph: Deep and responsible reasoning of large language model with knowledge graph","author":"Sun","year":"2023"},{"key":"ref132","doi-asserted-by":"publisher","DOI":"10.1145\/242224.242246"},{"key":"ref133","article-title":"Airline scheduling with max flow algorithm","volume-title":"Proc. Int. Joint Conf. Artif. Intell.","author":"Iqbal"},{"key":"ref134","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2021.115894"},{"key":"ref135","first-page":"156","article-title":"Graph-based knowledge tracing: Modeling student proficiency using graph neural network","volume-title":"Proc. IEEE\/WIC\/ACM Int. Conf. Web Intell.","author":"Nakagawa"},{"key":"ref136","doi-asserted-by":"publisher","DOI":"10.1145\/3340531.3412733"},{"key":"ref137","article-title":"Artificial intelligence for science in quantum, atomistic, and continuum systems","author":"Zhang","year":"2023"},{"key":"ref138","article-title":"A survey on oversmoothing in graph neural networks","author":"Rusch","year":"2023"},{"key":"ref139","article-title":"Understanding over-squashing and bottlenecks on graphs via curvature,","author":"Topping","year":"2021"},{"key":"ref140","first-page":"28877","article-title":"Do transformers really perform badly for graph representation?","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Ying"},{"key":"ref141","first-page":"14501","article-title":"Recipe for a general, powerful, scalable graph transformer","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Ramp\u00e1\u0161ek"},{"key":"ref142","doi-asserted-by":"publisher","DOI":"10.1145\/3580305.3599497"},{"key":"ref143","first-page":"27387","article-title":"Nodeformer: A scalable graph structure learning transformer for node classification","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Wu"},{"issue":"4","key":"ref144","first-page":"1","article-title":"Graph rationalization with environment-based augmentations","volume":"18","author":"Liu","year":"2022","journal-title":"ACM Trans. Knowl. Discov. Data"},{"key":"ref145","article-title":"ChemCrow: Augmenting large-language models with chemistry tools","author":"Bran","year":"2023"},{"key":"ref146","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-89689-0_33"},{"key":"ref147","doi-asserted-by":"publisher","DOI":"10.1021\/ci00057a005"},{"key":"ref148","doi-asserted-by":"publisher","DOI":"10.1186\/1758-2946-5-7"},{"key":"ref149","doi-asserted-by":"crossref","DOI":"10.26434\/chemrxiv.7097960.v1","article-title":"DeepSMILES: An adaptation of SMILES for use in machine-learning of chemical structures","author":"O\u2019Boyle","year":"2018"},{"key":"ref150","doi-asserted-by":"publisher","DOI":"10.1088\/2632-2153\/aba947"},{"key":"ref151","article-title":"SMILES enumeration as data augmentation for neural network modeling of molecules","author":"Bjerrum","year":"2017"},{"key":"ref152","doi-asserted-by":"publisher","DOI":"10.1186\/s13321-019-0393-0"},{"key":"ref153","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-30493-5_79"},{"key":"ref154","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-2012"},{"key":"ref155","doi-asserted-by":"publisher","DOI":"10.1088\/2632-2153\/ac3ffb"},{"key":"ref156","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.366"},{"key":"ref157","doi-asserted-by":"publisher","DOI":"10.1016\/j.compbiomed.2024.108073"},{"key":"ref158","doi-asserted-by":"publisher","DOI":"10.1021\/acscatal.3c04956"},{"key":"ref159","article-title":"Mol-instructions: A large-scale biomolecular instruction dataset for large language models","author":"Fang","year":"2023"},{"key":"ref160","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i10.28948"},{"key":"ref161","article-title":"MolFM: A multimodal molecular foundation model","author":"Luo","year":"2023"},{"key":"ref162","article-title":"Can large language models empower molecular property prediction?","author":"Qian","year":"2023"},{"key":"ref163","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-023-00639-z"},{"key":"ref164","doi-asserted-by":"publisher","DOI":"10.1109\/tkde.2024.3393356"},{"key":"ref165","doi-asserted-by":"publisher","DOI":"10.1093\/bioinformatics\/btae534"},{"key":"ref166","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.966"},{"key":"ref167","article-title":"What indeed can GPT models do in chemistry? A comprehensive benchmark on eight tasks","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Guo"},{"key":"ref168","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-short.138"},{"key":"ref169","first-page":"30458","article-title":"Enhancing activity prediction models in drug discovery with the ability to understand human language","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Seidl"},{"key":"ref170","first-page":"6140","article-title":"Unifying molecular and textual representations via multi-task language modelling","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Christofidellis"},{"key":"ref171","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-023-00759-6"},{"key":"ref172","article-title":"Extracting molecular properties from natural language with multimodal contrastive learning","volume-title":"Proc. Int. Conf. Mach. Learn. Workshop Comput. Biol.","author":"Lacombe"},{"key":"ref173","article-title":"A molecular multimodal foundation model associating molecule graphs with natural language","author":"Su","year":"2022"},{"key":"ref174","doi-asserted-by":"publisher","DOI":"10.1038\/s41467-022-28494-3"},{"key":"ref175","doi-asserted-by":"publisher","DOI":"10.1021\/acs.jcim.2c00626"},{"key":"ref176","doi-asserted-by":"publisher","DOI":"10.1021\/acs.jcim.1c00600"},{"key":"ref177","article-title":"Galactica: A large language model for science","author":"Taylor","year":"2022"},{"key":"ref178","doi-asserted-by":"publisher","DOI":"10.1145\/3307339.3342186"},{"key":"ref179","doi-asserted-by":"publisher","DOI":"10.1093\/bioinformatics\/btz682"},{"key":"ref180","doi-asserted-by":"publisher","DOI":"10.1021\/acs.jcim.0c00726"},{"key":"ref181","doi-asserted-by":"publisher","DOI":"10.1093\/nar\/gkv1031"},{"key":"ref182","doi-asserted-by":"publisher","DOI":"10.1093\/nar\/gky1033"},{"key":"ref183","doi-asserted-by":"publisher","DOI":"10.1093\/nar\/gkr777"},{"key":"ref184","doi-asserted-by":"publisher","DOI":"10.1093\/nar\/gkad1004"},{"key":"ref185","doi-asserted-by":"publisher","DOI":"10.1016\/j.yrtph.2018.11.002"},{"key":"ref186","doi-asserted-by":"publisher","DOI":"10.1016\/j.patter.2022.100588"},{"key":"ref187","first-page":"22118","article-title":"Open graph benchmark: Datasets for machine learning on graphs","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Hu"},{"key":"ref188","article-title":"How powerful are graph neural networks?","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Xu"},{"key":"ref189","doi-asserted-by":"publisher","DOI":"10.1007\/springerreference_9081"},{"key":"ref190","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3403104"},{"key":"ref191","article-title":"Data-centric learning from unlabeled graphs with diffusion model","author":"Liu","year":"2023"},{"key":"ref192","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i17.29889"},{"key":"ref193","doi-asserted-by":"publisher","DOI":"10.1145\/3340531.3411981"},{"key":"ref194","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-acl.149"},{"key":"ref195","doi-asserted-by":"publisher","DOI":"10.1038\/s41467-023-41948-6"},{"key":"ref196","doi-asserted-by":"publisher","DOI":"10.1016\/j.ddtec.2020.09.003"},{"key":"ref197","doi-asserted-by":"publisher","DOI":"10.1021\/ar500432k"},{"key":"ref198","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-023-06221-2"},{"key":"ref199","article-title":"Chemical-reaction-aware molecule representation learning","author":"Wang","year":"2021"},{"key":"ref200","doi-asserted-by":"publisher","DOI":"10.1016\/j.jbi.2023.104392"},{"key":"ref201","article-title":"Inverse molecular design with multi-conditional diffusion guidance","author":"Liu","year":"2024"},{"key":"ref202","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.422"},{"key":"ref203","article-title":"Advancing graph representation learning with large language models: A comprehensive survey of techniques","author":"Mao","year":"2024"},{"key":"ref204","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2024\/898"},{"key":"ref205","first-page":"2625","article-title":"Policy shaping: Integrating human feedback with reinforcement learning","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Griffith"},{"key":"ref206","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017"},{"key":"ref207","first-page":"53728","article-title":"Direct preference optimization: Your language model is secretly a reward model","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Rafailov"},{"key":"ref208","article-title":"A survey of large language models","author":"Zhao","year":"2023"},{"key":"ref209","article-title":"LM-infinite: Simple on-the-fly length generalization for large language models","author":"Han","year":"2023"},{"key":"ref210","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2023.127063"}],"container-title":["IEEE Transactions on Knowledge and Data Engineering"],"original-title":[],"link":[{"URL":"https:\/\/ieeexplore.ieee.org\/ielam\/69\/10750897\/10697304-aam.pdf","content-type":"application\/pdf","content-version":"am","intended-application":"syndication"},{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/69\/10750897\/10697304.pdf?arnumber=10697304","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T00:37:34Z","timestamp":1732667854000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10697304\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12]]},"references-count":210,"journal-issue":{"issue":"12"},"URL":"https:\/\/doi.org\/10.1109\/tkde.2024.3469578","relation":{},"ISSN":["1041-4347","1558-2191","2326-3865"],"issn-type":[{"value":"1041-4347","type":"print"},{"value":"1558-2191","type":"electronic"},{"value":"2326-3865","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,12]]}}}