{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,9]],"date-time":"2025-12-09T19:25:24Z","timestamp":1765308324480,"version":"3.46.0"},"publisher-location":"New York, NY, USA","reference-count":49,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,10,27]]},"DOI":"10.1145\/3746027.3755086","type":"proceedings-article","created":{"date-parts":[[2025,10,25]],"date-time":"2025-10-25T05:50:47Z","timestamp":1761371447000},"page":"1258-1267","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["BridgeGLM: Bridging Graph and Language Spaces for Domain Generalization"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9530-2858","authenticated-orcid":false,"given":"Jiaxing","family":"Qi","sequence":"first","affiliation":[{"name":"School of Computer Science and Engineering, Beihang University, Beijing, China and Beijing Digital Native Digital City Research Center, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-5136-9083","authenticated-orcid":false,"given":"Yifan","family":"Xu","sequence":"additional","affiliation":[{"name":"School of Computer Science and Engineering, Beihang University, Beijing, China and Beijing Digital Native Digital City Research Center, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0054-8896","authenticated-orcid":false,"given":"Zhifei","family":"Yang","sequence":"additional","affiliation":[{"name":"School of Computer Science, Peking University, Beijing, China and Beijing Digital Native Digital City Research Center, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-8481-2906","authenticated-orcid":false,"given":"Ruifei","family":"Ma","sequence":"additional","affiliation":[{"name":"Beijing Digital Native Digital City Research Center, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-3780-7453","authenticated-orcid":false,"given":"Chao","family":"Zhang","sequence":"additional","affiliation":[{"name":"Beijing Digital Native Digital City Research Center, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-3768-8440","authenticated-orcid":false,"given":"Kuifei","family":"Yu","sequence":"additional","affiliation":[{"name":"Beijing Digital Native Digital City Research Center, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2025,10,27]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Graphllm: Boosting graph reasoning ability of large language model. arXiv preprint arXiv:2310.05845","author":"Chai Ziwei","year":"2023","unstructured":"Ziwei Chai, Tianjie Zhang, Liang Wu, Kaiqiao Han, Xiaohai Hu, Xuanwen Huang, and Yang Yang. 2023. Graphllm: Boosting graph reasoning ability of large language model. arXiv preprint arXiv:2310.05845 (2023)."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1145\/3641289"},{"volume-title":"The Twelfth International Conference on Learning Representations.","author":"Chen Zhikai","key":"e_1_3_2_1_3_1","unstructured":"Zhikai Chen, Haitao Mao, Hongzhi Wen, Haoyu Han, Wei Jin, Haiyang Zhang, Hui Liu, and Jiliang Tang. [n.d.]. Label-free Node Classification on Graphs with Large Language Models (LLMs). In The Twelfth International Conference on Learning Representations."},{"key":"e_1_3_2_1_4_1","volume-title":"Talk like a graph: Encoding graphs for large language models. arXiv preprint arXiv:2310.04560","author":"Fatemi Bahare","year":"2023","unstructured":"Bahare Fatemi, Jonathan Halcrow, and Bryan Perozzi. 2023. Talk like a graph: Encoding graphs for large language models. arXiv preprint arXiv:2310.04560 (2023)."},{"key":"e_1_3_2_1_5_1","volume-title":"international conference on machine learning. PMLR","author":"Gao Hongyang","year":"2019","unstructured":"Hongyang Gao and Shuiwang Ji. 2019. Graph u-nets. In international conference on machine learning. PMLR, 2083-2092."},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1145\/2939672.2939754"},{"key":"e_1_3_2_1_7_1","volume-title":"Gpt4graph: Can large language models understand graph structured data? an empirical evaluation and benchmarking. arXiv preprint arXiv:2305.15066","author":"Guo Jiayan","year":"2023","unstructured":"Jiayan Guo, Lun Du, Hengyu Liu, Mengyu Zhou, Xinyi He, and Shi Han. 2023. Gpt4graph: Can large language models understand graph structured data? an empirical evaluation and benchmarking. arXiv preprint arXiv:2305.15066 (2023)."},{"key":"e_1_3_2_1_8_1","volume-title":"Inductive representation learning on large graphs. Advances in neural information processing systems","author":"Hamilton Will","year":"2017","unstructured":"Will Hamilton, Zhitao Ying, and Jure Leskovec. 2017. Inductive representation learning on large graphs. Advances in neural information processing systems, Vol. 30 (2017)."},{"volume-title":"Harnessing Explanations: LLM-to-LM Interpreter for Enhanced Text-Attributed Graph Representation Learning. In The Twelfth International Conference on Learning Representations.","author":"He Xiaoxin","key":"e_1_3_2_1_9_1","unstructured":"Xiaoxin He, Xavier Bresson, Thomas Laurent, Adam Perold, Yann LeCun, and Bryan Hooi. [n.d.]. Harnessing Explanations: LLM-to-LM Interpreter for Enhanced Text-Attributed Graph Representation Learning. In The Twelfth International Conference on Learning Representations."},{"key":"e_1_3_2_1_10_1","volume-title":"Beyond Text: A Deep Dive into Large Language Models' Ability on Understanding Graph Data. arXiv preprint arXiv:2310.04944","author":"Hu Yuntong","year":"2023","unstructured":"Yuntong Hu, Zheng Zhang, and Liang Zhao. 2023. Beyond Text: A Deep Dive into Large Language Models' Ability on Understanding Graph Data. arXiv preprint arXiv:2310.04944 (2023)."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-emnlp.75"},{"key":"e_1_3_2_1_12_1","volume-title":"Can llms effectively leverage graph structural information: when and why. arXiv preprint arXiv:2309.16595","author":"Huang Jin","year":"2023","unstructured":"Jin Huang, Xingjian Zhang, Qiaozhu Mei, and Jiaqi Ma. 2023. Can llms effectively leverage graph structural information: when and why. arXiv preprint arXiv:2309.16595 (2023)."},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1145\/3589334.3645627"},{"key":"e_1_3_2_1_14_1","volume-title":"Kolla Bhanu Prakash, and GR Kanagachidambaresan","author":"Imambi Sagar","year":"2021","unstructured":"Sagar Imambi, Kolla Bhanu Prakash, and GR Kanagachidambaresan. 2021. PyTorch. Programming with TensorFlow: solution for edge computing applications (2021), 87-104."},{"key":"e_1_3_2_1_15_1","volume-title":"Large language models on graphs: A comprehensive survey. arXiv preprint arXiv:2312.02783","author":"Jin Bowen","year":"2023","unstructured":"Bowen Jin, Gang Liu, Chi Han, Meng Jiang, Heng Ji, and Jiawei Han. 2023a. Large language models on graphs: A comprehensive survey. arXiv preprint arXiv:2312.02783 (2023)."},{"key":"e_1_3_2_1_16_1","volume-title":"Patton: Language model pretraining on text-rich networks. arXiv preprint arXiv:2305.12268","author":"Jin Bowen","year":"2023","unstructured":"Bowen Jin, Wentao Zhang, Yu Zhang, Yu Meng, Xinyang Zhang, Qi Zhu, and Jiawei Han. 2023c. Patton: Language model pretraining on text-rich networks. arXiv preprint arXiv:2305.12268 (2023)."},{"key":"e_1_3_2_1_17_1","volume-title":"Edgeformers: Graph-empowered transformers for representation learning on textual-edge networks. arXiv preprint arXiv:2302.11050","author":"Jin Bowen","year":"2023","unstructured":"Bowen Jin, Yu Zhang, Yu Meng, and Jiawei Han. 2023b. Edgeformers: Graph-empowered transformers for representation learning on textual-edge networks. arXiv preprint arXiv:2302.11050 (2023)."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1145\/3580305.3599376"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1148\/ryai.230364"},{"key":"e_1_3_2_1_20_1","unstructured":"Zhuofeng Li Zixing Gou Xiangnan Zhang Zhongyuan Liu Sirui Li Yuntong Hu Chen Ling Zheng Zhang and Liang Zhao. 2024. TEG-DB: A Comprehensive Dataset and Benchmark of Textual-Edge Graphs. arXiv:2406.10310"},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3680924"},{"key":"e_1_3_2_1_22_1","unstructured":"Zhiyuan Liu Sihang Li Yanchen Luo Hao Fei Yixin Cao Kenji Kawaguchi Xiang Wang and Tat-Seng Chua. 2023. MolCA: Molecular graph-language modeling with cross-modal projector and uni-modal adapter. arXiv preprint arXiv:2310.12798 (2023)."},{"key":"e_1_3_2_1_23_1","first-page":"462","article-title":"Generating training data with language models: Towards zero-shot language understanding","volume":"35","author":"Meng Yu","year":"2022","unstructured":"Yu Meng, Jiaxin Huang, Yu Zhang, and Jiawei Han. 2022. Generating training data with language models: Towards zero-shot language understanding. Advances in Neural Information Processing Systems, Vol. 35 (2022), 462-477.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_24_1","unstructured":"Bryan Perozzi Bahare Fatemi Dustin Zelle Anton Tsitsulin Mehran Kazemi Rami Al-Rfou and Jonathan Halcrow. 2024a. Let Your Graph Do the Talking: Encoding Structured Data for LLMs. arXiv:2402.05862 [cs.LG] https:\/\/arxiv.org\/abs\/2402.05862"},{"key":"e_1_3_2_1_25_1","volume-title":"Let your graph do the talking: Encoding structured data for llms. arXiv preprint arXiv:2402.05862","author":"Perozzi Bryan","year":"2024","unstructured":"Bryan Perozzi, Bahare Fatemi, Dustin Zelle, Anton Tsitsulin, Mehran Kazemi, Rami Al-Rfou, and Jonathan Halcrow. 2024b. Let your graph do the talking: Encoding structured data for llms. arXiv preprint arXiv:2402.05862 (2024)."},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.1145\/3626772.3657775"},{"key":"e_1_3_2_1_27_1","volume-title":"Kabilan Elangovan, Laura Gutierrez, Ting Fang Tan, and Daniel Shu Wei Ting.","author":"Thirunavukarasu Arun James","year":"2023","unstructured":"Arun James Thirunavukarasu, Darren Shu Jeng Ting, Kabilan Elangovan, Laura Gutierrez, Ting Fang Tan, and Daniel Shu Wei Ting. 2023. Large language models in medicine. Nature medicine, Vol. 29, 8 (2023), 1930-1940."},{"key":"e_1_3_2_1_28_1","volume-title":"Graph attention networks. arXiv preprint arXiv:1710.10903","author":"Veli\u010dkovi\u0107 Petar","year":"2017","unstructured":"Petar Veli\u010dkovi\u0107, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. 2017. Graph attention networks. arXiv preprint arXiv:1710.10903 (2017)."},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1145\/3626772.3657731"},{"key":"e_1_3_2_1_30_1","volume-title":"InstructGraph: Boosting Large Language Models via Graph-centric Instruction Tuning and Preference Alignment. arXiv preprint arXiv:2402.08785","author":"Wang Jianing","year":"2024","unstructured":"Jianing Wang, Junda Wu, Yupeng Hou, Yao Liu, Ming Gao, and Julian McAuley. 2024a. InstructGraph: Boosting Large Language Models via Graph-centric Instruction Tuning and Preference Alignment. arXiv preprint arXiv:2402.08785 (2024)."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1162\/qss_a_00021"},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3680682"},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1145\/3292500.3330950"},{"key":"e_1_3_2_1_34_1","volume-title":"Opengraph: Towards open graph foundation models. arXiv preprint arXiv:2403.01121","author":"Xia Lianghao","year":"2024","unstructured":"Lianghao Xia, Ben Kao, and Chao Huang. 2024. Opengraph: Towards open graph foundation models. arXiv preprint arXiv:2403.01121 (2024)."},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1007\/s00138-021-01251-0"},{"key":"e_1_3_2_1_36_1","volume-title":"Revisiting over-smoothing in deep GCNs. arXiv preprint arXiv:2003.13663","author":"Yang Chaoqi","year":"2020","unstructured":"Chaoqi Yang, Ruijie Wang, Shuochao Yao, Shengzhong Liu, and Tarek Abdelzaher. 2020. Revisiting over-smoothing in deep GCNs. arXiv preprint arXiv:2003.13663 (2020)."},{"key":"e_1_3_2_1_37_1","first-page":"28798","article-title":"Graphformers: Gnn-nested transformers for representation learning on textual graph","volume":"34","author":"Yang Junhan","year":"2021","unstructured":"Junhan Yang, Zheng Liu, Shitao Xiao, Chaozhuo Li, Defu Lian, Sanjay Agrawal, Amit Singh, Guangzhong Sun, and Xing Xie. 2021. Graphformers: Gnn-nested transformers for representation learning on textual graph. Advances in Neural Information Processing Systems, Vol. 34 (2021), 28798-28810.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_38_1","volume-title":"International conference on machine learning. PMLR, 40-48","author":"Yang Zhilin","year":"2016","unstructured":"Zhilin Yang, William Cohen, and Ruslan Salakhudinov. 2016. Revisiting semi-supervised learning with graph embeddings. In International conference on machine learning. PMLR, 40-48."},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i9.33017"},{"key":"e_1_3_2_1_40_1","volume-title":"Findings of the Association for Computational Linguistics: EACL 2024","author":"Ye Ruosong","year":"2024","unstructured":"Ruosong Ye, Caiqi Zhang, Runhui Wang, Shuyuan Xu, and Yongfeng Zhang. 2024. Language is all a graph needs. In Findings of the Association for Computational Linguistics: EACL 2024. 1955-1973."},{"key":"e_1_3_2_1_41_1","volume-title":"Leveraging Large Language Models for Node Generation in Few-Shot Learning on Text-Attributed Graphs. arXiv e-prints","author":"Yu Jianxiang","year":"2023","unstructured":"Jianxiang Yu, Yuxiang Ren, Chenghua Gong, Jiaqi Tan, Xiang Li, and Xuecang Zhang. 2023. Leveraging Large Language Models for Node Generation in Few-Shot Learning on Text-Attributed Graphs. arXiv e-prints (2023), arXiv-2310."},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3680865"},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"publisher","DOI":"10.1186\/s40649-019-0069-y"},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.1145\/3442381.3450114"},{"key":"e_1_3_2_1_45_1","unstructured":"Jianan Zhao Hesham Mostafa Mikhail Galkin Michael Bronstein Zhaocheng Zhu and Jian Tang. 2024. GraphAny: A Foundation Model for Node Classification on Any Graph. (2024). arXiv:2405.20445 [cs.LG]"},{"key":"e_1_3_2_1_46_1","volume-title":"Learning on large-scale text-attributed graphs via variational inference. arXiv preprint arXiv:2210.14709","author":"Zhao Jianan","year":"2022","unstructured":"Jianan Zhao, Meng Qu, Chaozhuo Li, Hao Yan, Qian Liu, Rui Li, Xing Xie, and Jian Tang. 2022. Learning on large-scale text-attributed graphs via variational inference. arXiv preprint arXiv:2210.14709 (2022)."},{"key":"e_1_3_2_1_47_1","volume-title":"Graphtext: Graph reasoning in text space. arXiv preprint arXiv:2310.01089","author":"Zhao Jianan","year":"2023","unstructured":"Jianan Zhao, Le Zhuo, Yikang Shen, Meng Qu, Kai Liu, Michael Bronstein, Zhaocheng Zhu, and Jian Tang. 2023. Graphtext: Graph reasoning in text space. arXiv preprint arXiv:2310.01089 (2023)."},{"key":"e_1_3_2_1_48_1","volume-title":"Each Graph is a New Language: Graph Learning with LLMs. arXiv e-prints","author":"Zhou Huachi","year":"2025","unstructured":"Huachi Zhou, Jiahe Du, Chuang Zhou, Chang Yang, Yilin Xiao, Yuxuan Xie, and Xiao Huang. 2025. Each Graph is a New Language: Graph Learning with LLMs. arXiv e-prints (2025), arXiv-2501."},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"publisher","DOI":"10.1145\/3442381.3449842"}],"event":{"name":"MM '25: The 33rd ACM International Conference on Multimedia","sponsor":["SIGMM ACM Special Interest Group on Multimedia"],"location":"Dublin Ireland","acronym":"MM '25"},"container-title":["Proceedings of the 33rd ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3746027.3755086","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,9]],"date-time":"2025-12-09T19:20:29Z","timestamp":1765308029000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3746027.3755086"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,27]]},"references-count":49,"alternative-id":["10.1145\/3746027.3755086","10.1145\/3746027"],"URL":"https:\/\/doi.org\/10.1145\/3746027.3755086","relation":{},"subject":[],"published":{"date-parts":[[2025,10,27]]},"assertion":[{"value":"2025-10-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}