{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,10]],"date-time":"2026-04-10T10:04:56Z","timestamp":1775815496320,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":24,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,5,13]],"date-time":"2024-05-13T00:00:00Z","timestamp":1715558400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,5,13]]},"DOI":"10.1145\/3589335.3651476","type":"proceedings-article","created":{"date-parts":[[2024,5,12]],"date-time":"2024-05-12T18:41:21Z","timestamp":1715539281000},"page":"481-484","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":19,"title":["Can we Soft Prompt LLMs for Graph Learning Tasks?"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-7809-4586","authenticated-orcid":false,"given":"Zheyuan","family":"Liu","sequence":"first","affiliation":[{"name":"University of Notre Dame, South Bend, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8281-8070","authenticated-orcid":false,"given":"Xiaoxin","family":"He","sequence":"additional","affiliation":[{"name":"National University of Singapore, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2795-6080","authenticated-orcid":false,"given":"Yijun","family":"Tian","sequence":"additional","affiliation":[{"name":"University of Notre Dame, South Bend, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3932-5956","authenticated-orcid":false,"given":"Nitesh V.","family":"Chawla","sequence":"additional","affiliation":[{"name":"University of Notre Dame, South Bend, USA"}]}],"member":"320","published-online":{"date-parts":[[2024,5,13]]},"reference":[{"key":"e_1_3_2_2_1_1","volume-title":"ConGraT: Self-Supervised Contrastive Pretraining for Joint Graph and Text Embeddings. arXiv preprint arXiv:2305.14321","author":"Brannon William","year":"2023","unstructured":"William Brannon, Suyash Fulay, Hang Jiang, Wonjune Kang, Brandon Roy, Jad Kabbara, and Deb Roy. 2023. ConGraT: Self-Supervised Contrastive Pretraining for Joint Graph and Text Embeddings. arXiv preprint arXiv:2305.14321 (2023)."},{"key":"e_1_3_2_2_2_1","volume-title":"Quantifying memorization across neural language models. arXiv preprint arXiv:2202.07646","author":"Carlini Nicholas","year":"2022","unstructured":"Nicholas Carlini, Daphne Ippolito, Matthew Jagielski, Katherine Lee, Florian Tramer, and Chiyuan Zhang. 2022. Quantifying memorization across neural language models. arXiv preprint arXiv:2202.07646 (2022)."},{"key":"e_1_3_2_2_3_1","volume-title":"Graphllm: Boosting graph reasoning ability of large language model. arXiv preprint arXiv:2310.05845","author":"Chai Ziwei","year":"2023","unstructured":"Ziwei Chai, Tianjie Zhang, Liang Wu, Kaiqiao Han, Xiaohai Hu, Xuanwen Huang, and Yang Yang. 2023. Graphllm: Boosting graph reasoning ability of large language model. arXiv preprint arXiv:2310.05845 (2023)."},{"key":"e_1_3_2_2_4_1","volume-title":"Charles Sutton, Sebastian Gehrmann, et al.","author":"Chowdhery Aakanksha","year":"2023","unstructured":"Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. 2023. Palm: Scaling language modeling with pathways. Journal of Machine Learning Research (2023)."},{"key":"e_1_3_2_2_5_1","volume-title":"Explanations as Features: LLM-Based Features for Text-Attributed Graphs. arXiv preprint arXiv:2305.19523","author":"He Xiaoxin","year":"2023","unstructured":"Xiaoxin He, Xavier Bresson, Thomas Laurent, and Bryan Hooi. 2023. Explanations as Features: LLM-Based Features for Text-Attributed Graphs. arXiv preprint arXiv:2305.19523 (2023)."},{"key":"e_1_3_2_2_6_1","volume-title":"G-Retriever: Retrieval-Augmented Generation for Textual Graph Understanding and Question Answering. arXiv preprint arXiv:2402.07630","author":"He Xiaoxin","year":"2024","unstructured":"Xiaoxin He, Yijun Tian, Yifei Sun, Nitesh V Chawla, Thomas Laurent, Yann LeCun, Xavier Bresson, and Bryan Hooi. 2024. G-Retriever: Retrieval-Augmented Generation for Textual Graph Understanding and Question Answering. arXiv preprint arXiv:2402.07630 (2024)."},{"key":"e_1_3_2_2_7_1","volume-title":"Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al.","author":"Hoffmann Jordan","year":"2022","unstructured":"Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al. 2022. Training compute-optimal large language models. arXiv preprint arXiv:2203.15556 (2022)."},{"key":"e_1_3_2_2_8_1","unstructured":"Aitor Lewkowycz Anders Andreassen David Dohan Ethan Dyer Henryk Michalewski Vinay Ramasesh Ambrose Slone Cem Anil Imanol Schlag Theo Gutman-Solo et al. 2022. Solving quantitative reasoning problems with language models. In NeurIPS."},{"key":"e_1_3_2_2_9_1","unstructured":"Percy Liang Rishi Bommasani Tony Lee Dimitris Tsipras Dilara Soylu Michihiro Yasunaga Yian Zhang Deepak Narayanan Yuhuai Wu Ananya Kumar et al. 2022. Holistic evaluation of language models. arXiv preprint arXiv:2211.09110 (2022)."},{"key":"e_1_3_2_2_10_1","volume-title":"Train Your Own GNN Teacher: Graph-Aware Distillation on Textual Graphs. arXiv preprint arXiv:2304.10668","author":"Mavromatis Costas","year":"2023","unstructured":"Costas Mavromatis, Vassilis N Ioannidis, Shen Wang, Da Zheng, Soji Adeshina, Jun Ma, Han Zhao, Christos Faloutsos, and George Karypis. 2023. Train Your Own GNN Teacher: Graph-Aware Distillation on Textual Graphs. arXiv preprint arXiv:2304.10668 (2023)."},{"key":"e_1_3_2_2_11_1","volume-title":"Neighborhood contrastive learning for scientific document representations with citation embeddings. arXiv preprint arXiv:2202.06671","author":"Ostendorff Malte","year":"2022","unstructured":"Malte Ostendorff, Nils Rethmeier, Isabelle Augenstein, Bela Gipp, and Georg Rehm. 2022. Neighborhood contrastive learning for scientific document representations with citation embeddings. arXiv preprint arXiv:2202.06671 (2022)."},{"key":"e_1_3_2_2_12_1","unstructured":"Long Ouyang Jeffrey Wu Xu Jiang Diogo Almeida Carroll Wainwright Pamela Mishkin Chong Zhang Sandhini Agarwal Katarina Slama Alex Ray et al. 2022. Training language models to follow instructions with human feedback. In NeurIPS."},{"key":"e_1_3_2_2_13_1","volume-title":"Democratizing Large Language Models via Personalized Parameter-Efficient Fine-tuning. arXiv preprint arXiv:2402.04401","author":"Tan Zhaoxuan","year":"2024","unstructured":"Zhaoxuan Tan, Qingkai Zeng, Yijun Tian, Zheyuan Liu, Bing Yin, and Meng Jiang. 2024. Democratizing Large Language Models via Personalized Parameter-Efficient Fine-tuning. arXiv preprint arXiv:2402.04401 (2024)."},{"key":"e_1_3_2_2_14_1","volume-title":"2024 a. TinyLLM: Learning a Small Student from Multiple Large Language Models. arXiv preprint arXiv:2402.04616","author":"Tian Yijun","year":"2024","unstructured":"Yijun Tian, Yikun Han, Xiusi Chen, Wei Wang, and Nitesh V Chawla. 2024 a. TinyLLM: Learning a Small Student from Multiple Large Language Models. arXiv preprint arXiv:2402.04616 (2024)."},{"key":"e_1_3_2_2_15_1","doi-asserted-by":"crossref","unstructured":"Yijun Tian Huan Song Zichen Wang Haozhu Wang Ziqing Hu Fang Wang Nitesh V Chawla and Panpan Xu. 2024 b. Graph neural prompting with large language models. In AAAI.","DOI":"10.1609\/aaai.v38i17.29875"},{"key":"e_1_3_2_2_16_1","unstructured":"Hugo Touvron Louis Martin Kevin Stone Peter Albert Amjad Almahairi Yasmine Babaei Nikolay Bashlykov Soumya Batra Prajjwal Bhargava Shruti Bhosale et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288 (2023)."},{"key":"e_1_3_2_2_17_1","volume-title":"Graph attention networks. arXiv preprint arXiv:1710.10903","author":"Petar Velivc","year":"2017","unstructured":"Petar Velivc kovi\u0107, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. 2017. Graph attention networks. arXiv preprint arXiv:1710.10903 (2017)."},{"key":"e_1_3_2_2_18_1","doi-asserted-by":"crossref","unstructured":"Yu Wang Nedim Lipka Ryan A Rossi Alexa Siu Ruiyi Zhang and Tyler Derr. 2024. Knowledge graph prompting for multi-document question answering. In AAAI.","DOI":"10.1609\/aaai.v38i17.29889"},{"key":"e_1_3_2_2_19_1","volume-title":"Llmrec: Large language models with graph augmentation for recommendation. In WSDM. 806--815.","author":"Wei Wei","year":"2024","unstructured":"Wei Wei, Xubin Ren, Jiabin Tang, Qinyong Wang, Lixin Su, Suqi Cheng, Junfeng Wang, Dawei Yin, and Chao Huang. 2024. Llmrec: Large language models with graph augmentation for recommendation. In WSDM. 806--815."},{"key":"e_1_3_2_2_20_1","volume-title":"Augmenting Low-Resource Text Classification with Graph-Grounded Pre-training and Prompting. arXiv preprint arXiv:2305.03324","author":"Wen Zhihao","year":"2023","unstructured":"Zhihao Wen and Yuan Fang. 2023. Augmenting Low-Resource Text Classification with Graph-Grounded Pre-training and Prompting. arXiv preprint arXiv:2305.03324 (2023)."},{"key":"e_1_3_2_2_21_1","unstructured":"Junhan Yang Zheng Liu Shitao Xiao Chaozhuo Li Defu Lian Sanjay Agrawal Amit Singh Guangzhong Sun and Xing Xie. 2021. GraphFormers: GNN-nested transformers for representation learning on textual graph. In NeurIPS."},{"key":"e_1_3_2_2_22_1","volume-title":"Empower text-attributed graphs learning with large language models (llms). arXiv preprint arXiv:2310.09872","author":"Yu Jianxiang","year":"2023","unstructured":"Jianxiang Yu, Yuxiang Ren, Chenghua Gong, Jiaqi Tan, Xiang Li, and Xuecang Zhang. 2023. Empower text-attributed graphs learning with large language models (llms). arXiv preprint arXiv:2310.09872 (2023)."},{"key":"e_1_3_2_2_23_1","doi-asserted-by":"crossref","unstructured":"Yu Zhang Zhihong Shen Chieh-Han Wu Boya Xie Junheng Hao Ye-Yi Wang Kuansan Wang and Jiawei Han. 2022. Metadata-induced contrastive learning for zero-shot multi-label text classification. In WWW.","DOI":"10.1145\/3485447.3512174"},{"key":"e_1_3_2_2_24_1","volume-title":"Learning on large-scale text-attributed graphs via variational inference. arXiv preprint arXiv:2210.14709","author":"Zhao Jianan","year":"2022","unstructured":"Jianan Zhao, Meng Qu, Chaozhuo Li, Hao Yan, Qian Liu, Rui Li, Xing Xie, and Jian Tang. 2022. Learning on large-scale text-attributed graphs via variational inference. arXiv preprint arXiv:2210.14709 (2022)."}],"event":{"name":"WWW '24: The ACM Web Conference 2024","location":"Singapore Singapore","acronym":"WWW '24","sponsor":["SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"]},"container-title":["Companion Proceedings of the ACM Web Conference 2024"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3589335.3651476","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3589335.3651476","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T00:31:16Z","timestamp":1755822676000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3589335.3651476"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,13]]},"references-count":24,"alternative-id":["10.1145\/3589335.3651476","10.1145\/3589335"],"URL":"https:\/\/doi.org\/10.1145\/3589335.3651476","relation":{},"subject":[],"published":{"date-parts":[[2024,5,13]]},"assertion":[{"value":"2024-05-13","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}