{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:33:25Z","timestamp":1763192005511,"version":"3.45.0"},"reference-count":41,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/ijcnn64981.2025.11229162","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:15Z","timestamp":1763145975000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["Kolmogorov-Arnold Network-Enhanced Cross-Modal Alignment for Textual Attribute Graph Reasoning with Large Language Models"],"prefix":"10.1109","author":[{"given":"Qihao","family":"Huang","sequence":"first","affiliation":[{"name":"Southeast University,Nanjing,China"}]},{"given":"Chen","family":"Jin","sequence":"additional","affiliation":[{"name":"Zhejiang Scientific Research Institute of Transport,Hangzhou,China"}]},{"given":"Shuke","family":"He","sequence":"additional","affiliation":[{"name":"Zhejiang Scientific Research Institute of Transport,Hangzhou,China"}]},{"given":"Lisheng","family":"Shu","sequence":"additional","affiliation":[{"name":"Zhejiang Scientific Research Institute of Transport,Hangzhou,China"}]},{"given":"Yikang","family":"Long","sequence":"additional","affiliation":[{"name":"Zhejiang Jiande Institute of General Aviation,Hangzhou,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/s11023-020-09548-1"},{"article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","year":"2018","author":"Devlin","key":"ref2"},{"article-title":"The llama 3 herd of models","year":"2024","author":"Dubey","key":"ref3"},{"article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","year":"2021","author":"Dosovitskiy","key":"ref4"},{"article-title":"Instruction mining: Instruction data selection for tuning large language models","year":"2024","author":"Cao","key":"ref5"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3655103.3655110"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/3637528.3671460"},{"key":"ref8","first-page":"5850","article-title":"Gimlet: A unified graph-text model for instruction-based molecule zero-shot learning","volume":"36","author":"Zhao","year":"2023","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3626772.3657775"},{"article-title":"Semi-supervised classification with graph convolutional networks","year":"2016","author":"Kipf","key":"ref10"},{"article-title":"How powerful are graph neural networks?","year":"2018","author":"Xu","key":"ref11"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.966"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i17.29875"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN55064.2022.9892249"},{"article-title":"Llms as zero-shot graph learners: Alignment of gnn representations with llm token embeddings","year":"2024","author":"Wang","key":"ref15"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/s10618-023-00982-0"},{"article-title":"Prompt-based node feature extractor for few-shot learning on text-attributed graphs","year":"2023","author":"Huang","key":"ref17"},{"article-title":"Harnessing explanations: Llm-to-lm interpreter for enhanced text-attributed graph representation learning","year":"2023","author":"He","key":"ref18"},{"article-title":"G-retriever: Retrieval-augmented generation for textual graph understanding and question answering","year":"2024","author":"He","key":"ref19"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1145\/3340531.3411924"},{"key":"ref21","first-page":"28877","article-title":"Do transformers really perform badly for graph representation?","volume":"34","author":"Ying","year":"2021","journal-title":"Advances in neural information processing systems"},{"article-title":"Kan: Kolmogorov-arnold networks","year":"2024","author":"Liu","key":"ref22"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.2995074"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-01588-5"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TBDATA.2016.2599923"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1145\/3589335.3641255"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/3404835.3462809"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-91452-7_7"},{"key":"ref29","first-page":"5812","article-title":"Graph contrastive learning with augmentations","volume":"33","author":"You","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1145\/3580305.3599833"},{"key":"ref31","article-title":"Walklm: A uniform language model fine-tuning framework for attributed graph embedding","volume":"36","author":"Tan","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"Efficient large language models fine-tuning on graphs","year":"2023","author":"Xue","key":"ref32"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/MIS.2024.3378921"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/3512467"},{"issue":"20","key":"ref35","first-page":"10","article-title":"Graph attention networks","volume":"1050","author":"Velickovic","year":"2017","journal-title":"stat"},{"article-title":"Chebyshev polynomial-based kolmogorov-arnold networks: An efficient architecture for nonlinear function approximation","year":"2024","author":"SS","key":"ref36"},{"key":"ref37","first-page":"22199","article-title":"Large language models are zero-shot reasoners","volume":"35","author":"Kojima","year":"2022","journal-title":"Advances in neural information processing systems"},{"key":"ref38","article-title":"Can language models solve graph problems in natural language?","volume":"36","author":"Wang","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.matching-1.7"},{"article-title":"Let your graph do the talking: Encoding structured data for llms","year":"2024","author":"Perozzi","key":"ref40"},{"issue":"11","key":"ref41","article-title":"Visualizing data using t-sne","volume":"9","author":"Van der Maaten","year":"2008","journal-title":"Journal of machine learning research"}],"event":{"name":"2025 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2025,6,30]]},"location":"Rome, Italy","end":{"date-parts":[[2025,7,5]]}},"container-title":["2025 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11227166\/11227148\/11229162.pdf?arnumber=11229162","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:30:08Z","timestamp":1763191808000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11229162\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":41,"URL":"https:\/\/doi.org\/10.1109\/ijcnn64981.2025.11229162","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}