{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,10]],"date-time":"2026-04-10T10:07:40Z","timestamp":1775815660898,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":57,"publisher":"ACM","funder":[{"name":"Postdoctoral Fellowship Program of CPSF under Grant","award":["GZB20240358,2024M761680"],"award-info":[{"award-number":["GZB20240358,2024M761680"]}]},{"name":"New Cornerstone Science Foundation through the XPLORER PRIZE"},{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62372483,62406164"],"award-info":[{"award-number":["62372483,62406164"]}],"id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"NSFC for Distinguished Young Scholar","award":["62425601"],"award-info":[{"award-number":["62425601"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,8,3]]},"DOI":"10.1145\/3711896.3736993","type":"proceedings-article","created":{"date-parts":[[2025,8,3]],"date-time":"2025-08-03T20:54:17Z","timestamp":1754254457000},"page":"2222-2233","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":1,"title":["GuARD: Effective Anomaly Detection through a Text-Rich and Graph-Informed Language Model"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0005-2425-8836","authenticated-orcid":false,"given":"Yunhe","family":"Pang","sequence":"first","affiliation":[{"name":"School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9629-5493","authenticated-orcid":false,"given":"Bo","family":"Chen","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Technology, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8551-1966","authenticated-orcid":false,"given":"Fanjin","family":"Zhang","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Technology, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1610-9599","authenticated-orcid":false,"given":"Yanghui","family":"Rao","sequence":"additional","affiliation":[{"name":"School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3247-4166","authenticated-orcid":false,"given":"Evgeny","family":"Kharlamov","sequence":"additional","affiliation":[{"name":"Bosch Center for Artificial Intelligence, Robert Bosch GmbH, Renningen, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3487-4593","authenticated-orcid":false,"given":"Jie","family":"Tang","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Technology, Tsinghua University, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2025,8,3]]},"reference":[{"key":"e_1_3_2_2_1_1","volume-title":"Mohammad Akbari, and Ebrahim Mahdipour.","author":"Abkenar Sepideh Bazzaz","year":"2023","unstructured":"Sepideh Bazzaz Abkenar, Mostafa Haghi Kashani, Mohammad Akbari, and Ebrahim Mahdipour. 2023. Learning textual features for Twitter spam detection: A systematic literature review. Expert Systems with Applications, Vol. 228 (2023)."},{"key":"e_1_3_2_2_2_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-emnlp.74"},{"key":"e_1_3_2_2_3_1","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown Tom","year":"2020","unstructured":"Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in Neural Information Processing Systems, Vol. 33 (2020), 1877-1901.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_4_1","doi-asserted-by":"publisher","DOI":"10.1145\/3616855.3635843"},{"key":"e_1_3_2_2_5_1","doi-asserted-by":"publisher","DOI":"10.1145\/3580305.3599930"},{"key":"e_1_3_2_2_6_1","first-page":"8037","article-title":"GCCAD: Graph contrastive coding for anomaly detection","volume":"35","author":"Chen Bo","year":"2022","unstructured":"Bo Chen, Jing Zhang, Xiaokang Zhang, Yuxiao Dong, Jian Song, Peng Zhang, Kaibo Xu, Evgeny Kharlamov, and Jie Tang. 2022. GCCAD: Graph contrastive coding for anomaly detection. IEEE Transactions on Knowledge and Data Engineering, Vol. 35 (2022), 8037-8051.","journal-title":"IEEE Transactions on Knowledge and Data Engineering"},{"key":"e_1_3_2_2_7_1","volume-title":"Proceeding of the 12th International Conference on Learning Representations.","author":"Chen Canyu","year":"2024","unstructured":"Canyu Chen and Kai Shu. 2024. Can LLM-Generated misinformation be detected?. In Proceeding of the 12th International Conference on Learning Representations."},{"key":"e_1_3_2_2_8_1","volume-title":"Flashattention-2: Faster attention with better parallelism and work partitioning. arXiv preprint arXiv:2307.08691","author":"Dao Tri","year":"2023","unstructured":"Tri Dao. 2023. Flashattention-2: Faster attention with better parallelism and work partitioning. arXiv preprint arXiv:2307.08691 (2023)."},{"key":"e_1_3_2_2_9_1","first-page":"16344","article-title":"Flashattention: Fast and memory-efficient exact attention with io-awareness","volume":"35","author":"Dao Tri","year":"2022","unstructured":"Tri Dao, Dan Fu, Stefano Ermon, Atri Rudra, and Christopher R\u00e9. 2022. Flashattention: Fast and memory-efficient exact attention with io-awareness. Advances in Neural Information Processing Systems, Vol. 35 (2022), 16344-16359.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_10_1","volume-title":"Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805","author":"Devlin Jacob","year":"2018","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)."},{"key":"e_1_3_2_2_11_1","volume-title":"Proceeding of the 29th ACM International Conference on Information and Knowledge Management.","author":"Dou Yingtong","unstructured":"Yingtong Dou, Zhiwei Liu, Li Sun, Yutong Deng, Hao Peng, and Philip S. Yu. 2020. Enhancing Graph Neural Network-based Fraud Detectors against Camouflaged Fraudsters. In Proceeding of the 29th ACM International Conference on Information and Knowledge Management."},{"key":"e_1_3_2_2_12_1","doi-asserted-by":"publisher","DOI":"10.1145\/3459637.3482019"},{"key":"e_1_3_2_2_13_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.196"},{"key":"e_1_3_2_2_14_1","unstructured":"Team GLM Aohan Zeng Bin Xu Bowen Wang Chenhui Zhang Da Yin Diego Rojas Guanyu Feng Hanlin Zhao Hanyu Lai et al. 2024. ChatGLM: A family of large language models from GLM-130B to GLM-4 all tools. arXiv preprint arXiv:2406.12793 (2024)."},{"key":"e_1_3_2_2_15_1","volume-title":"LogLLM: Log-based anomaly detection using large language models. arXiv preprint arXiv:2411.08561","author":"Guan Wei","year":"2024","unstructured":"Wei Guan, Jian Cao, Shiyou Qian, and Jianqi Gao. 2024. LogLLM: Log-based anomaly detection using large language models. arXiv preprint arXiv:2411.08561 (2024)."},{"key":"e_1_3_2_2_16_1","volume-title":"Sai Qian Zhang, et al","author":"Han Zeyu","year":"2024","unstructured":"Zeyu Han, Chao Gao, Jinyang Liu, Sai Qian Zhang, et al. 2024. Parameter-efficient fine-tuning for large models: A comprehensive survey. arXiv preprint arXiv:2403.14608 (2024)."},{"key":"e_1_3_2_2_17_1","volume-title":"Proceeding of the 9th International Conference on Learning Representations.","author":"He Pengcheng","year":"2021","unstructured":"Pengcheng He, Xiaodong Liu, Jianfeng Gao, and Weizhu Chen. 2021. Deberta: decoding-Enhanced Bert with Disentangled Attention. In Proceeding of the 9th International Conference on Learning Representations."},{"key":"e_1_3_2_2_18_1","volume-title":"LoRA: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685","author":"Hu Edward J","year":"2021","unstructured":"Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. LoRA: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685 (2021)."},{"key":"e_1_3_2_2_19_1","doi-asserted-by":"publisher","DOI":"10.1145\/3366423.3380027"},{"key":"e_1_3_2_2_20_1","volume-title":"Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al.","author":"Jiang Albert Q","year":"2023","unstructured":"Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al. 2023. Mistral 7B. arXiv preprint arXiv:2310.06825 (2023)."},{"key":"e_1_3_2_2_21_1","doi-asserted-by":"publisher","DOI":"10.5555\/3692070.3692978"},{"key":"e_1_3_2_2_22_1","volume-title":"Advances in Neural Information Processing Systems","volume":"30","author":"Ke Guolin","year":"2017","unstructured":"Guolin Ke, Qi Meng, Thomas Finley, Taifeng Wang, Wei Chen, Weidong Ma, Qiwei Ye, and Tie-Yan Liu. 2017. Lightgbm: A highly efficient gradient boosting decision tree. Advances in Neural Information Processing Systems, Vol. 30 (2017)."},{"key":"e_1_3_2_2_23_1","unstructured":"Thomas N Kipf and Max Welling. 2017. Semi-supervised classification with graph convolutional networks. (2017)."},{"key":"e_1_3_2_2_24_1","volume-title":"Efficient sequence packing without cross-contamination: Accelerating large language models without impacting performance. arXiv preprint arXiv:2107.02027","author":"Krell Mario Michael","year":"2021","unstructured":"Mario Michael Krell, Matej Kosec, Sergio P Perez, and Andrew Fitzgibbon. 2021. Efficient sequence packing without cross-contamination: Accelerating large language models without impacting performance. arXiv preprint arXiv:2107.02027 (2021)."},{"key":"e_1_3_2_2_25_1","volume-title":"Proceedings of the 40th International Conference on Machine Learning. 19730-19742","author":"Li Junnan","year":"2023","unstructured":"Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. 2023a. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In Proceedings of the 40th International Conference on Machine Learning. 19730-19742."},{"key":"e_1_3_2_2_26_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.353"},{"key":"e_1_3_2_2_27_1","volume-title":"Towards general text embeddings with multi-stage contrastive learning. arXiv preprint arXiv:2308.03281","author":"Li Zehan","year":"2023","unstructured":"Zehan Li, Xin Zhang, Yanzhao Zhang, Dingkun Long, Pengjun Xie, and Meishan Zhang. 2023b. Towards general text embeddings with multi-stage contrastive learning. arXiv preprint arXiv:2308.03281 (2023)."},{"key":"e_1_3_2_2_28_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00638"},{"key":"e_1_3_2_2_29_1","volume-title":"Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692","author":"Liu Yinhan","year":"2019","unstructured":"Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692 (2019)."},{"key":"e_1_3_2_2_30_1","volume-title":"Fixing weight decay regularization in adam. CoRR","author":"Loshchilov Ilya","year":"2017","unstructured":"Ilya Loshchilov and Frank Hutter. 2017. Fixing weight decay regularization in adam. CoRR, Vol. abs\/1711.05101 (2017)."},{"key":"e_1_3_2_2_31_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.883"},{"key":"e_1_3_2_2_32_1","doi-asserted-by":"publisher","DOI":"10.1145\/3447548.3467350"},{"key":"e_1_3_2_2_33_1","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2021.3118815"},{"key":"e_1_3_2_2_34_1","volume-title":"Introducing meta llama 3: The most capable openly available llm to date. Meta AI","author":"Meta AI","year":"2024","unstructured":"AI Meta. 2024. Introducing meta llama 3: The most capable openly available llm to date. Meta AI (2024)."},{"key":"e_1_3_2_2_35_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.semeval-1.317"},{"key":"e_1_3_2_2_36_1","volume-title":"Searching for activation functions. arXiv preprint arXiv:1710.05941","author":"Ramachandran Prajit","year":"2017","unstructured":"Prajit Ramachandran, Barret Zoph, and Quoc V Le. 2017. Searching for activation functions. arXiv preprint arXiv:1710.05941 (2017)."},{"key":"e_1_3_2_2_37_1","doi-asserted-by":"publisher","DOI":"10.1145\/2517288.2517299"},{"key":"e_1_3_2_2_38_1","volume-title":"The semantic web: 15th international conference","author":"Schlichtkrull Michael Sejr","unstructured":"Michael Sejr Schlichtkrull, Thomas N. Kipf, Peter Bloem, Rianne van den Berg, Ivan Titov, and Max Welling. 2018. Modeling Relational Data with Graph Convolutional Networks. In The semantic web: 15th international conference, Vol. 10843."},{"key":"e_1_3_2_2_39_1","volume-title":"Glu variants improve transformer. arXiv preprint arXiv:2002.05202","author":"Shazeer Noam","year":"2020","unstructured":"Noam Shazeer. 2020. Glu variants improve transformer. arXiv preprint arXiv:2002.05202 (2020)."},{"key":"e_1_3_2_2_40_1","volume-title":"KDD 2024 OAG-Challenge Cup.","author":"Shen Ming","year":"2024","unstructured":"Ming Shen. 2024. An ensemble model with multi-scale features for incorrect assignment detection. In KDD 2024 OAG-Challenge Cup."},{"key":"e_1_3_2_2_41_1","first-page":"29628","article-title":"GADBench: Revisiting and Benchmarking Supervised Graph Anomaly Detection","volume":"36","author":"Tang Jianheng","year":"2023","unstructured":"Jianheng Tang, Fengrui Hua, Ziqi Gao, Peilin Zhao, and Jia Li. 2023. GADBench: Revisiting and Benchmarking Supervised Graph Anomaly Detection. In Advances in Neural Information Processing Systems, Vol. 36. 29628-29653.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_42_1","volume-title":"Proceeding of the 39th International Conference on Machine Learning","volume":"162","author":"Tang Jianheng","year":"2022","unstructured":"Jianheng Tang, Jiajin Li, Ziqi Gao, and Jia Li. 2022. Rethinking Graph Neural Networks for Anomaly Detection. In Proceeding of the 39th International Conference on Machine Learning, Vol. 162."},{"key":"e_1_3_2_2_43_1","doi-asserted-by":"publisher","DOI":"10.1145\/1401890.1402008"},{"key":"e_1_3_2_2_44_1","volume-title":"Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)."},{"key":"e_1_3_2_2_45_1","volume-title":"DELL: Generating Reactions and Explanations for LLM-Based Misinformation Detection. In Findings of the Association for Computational Linguistics.","author":"Wan Herun","year":"2024","unstructured":"Herun Wan, Shangbin Feng, Zhaoxuan Tan, Heng Wang, Yulia Tsvetkov, and Minnan Luo. 2024. DELL: Generating Reactions and Explanations for LLM-Based Misinformation Detection. In Findings of the Association for Computational Linguistics."},{"key":"e_1_3_2_2_46_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.cose.2017.11.013"},{"key":"e_1_3_2_2_47_1","volume-title":"KDD 2024 OAG-Challenge Cup.","author":"Yan Qiang","year":"2024","unstructured":"Qiang Yan and AsirAsir. 2024. Synergizing large language models and tree-based algorithms for author name disambiguation. In KDD 2024 OAG-Challenge Cup."},{"key":"e_1_3_2_2_48_1","unstructured":"An Yang Baosong Yang Binyuan Hui Bo Zheng Bowen Yu Chang Zhou Chengpeng Li Chengyuan Li Dayiheng Liu Fei Huang et al. 2024b. Qwen2 technical report. arXiv preprint arXiv:2407.10671 (2024)."},{"key":"e_1_3_2_2_49_1","unstructured":"An Yang Baosong Yang Beichen Zhang Binyuan Hui Bo Zheng Bowen Yu and et al. 2024c. Qwen2.5 Technical Report. CoRR Vol. abs\/2412.15115 (2024)."},{"key":"e_1_3_2_2_50_1","volume-title":"AD-LLM: Benchmarking Large Language Models for Anomaly Detection. CoRR","author":"Yang Tiankai","year":"2024","unstructured":"Tiankai Yang, Yi Nian, Shawn Li, Ruiyao Xu, Yuangang Li, Jiaqi Li, Zhuo Xiao, Xiyang Hu, Ryan A. Rossi, Kaize Ding, Xiaohu You, and Yue Zhao. 2024a. AD-LLM: Benchmarking Large Language Models for Anomaly Detection. CoRR, Vol. abs\/2412.11142 (2024)."},{"key":"e_1_3_2_2_51_1","volume-title":"Proceedings of the 5th International Conference on Learning Representations","author":"Zeng Aohan","year":"2022","unstructured":"Aohan Zeng, Xiao Liu, Zhengxiao Du, Zihan Wang, Hanyu Lai, Ming Ding, Zhuoyi Yang, Yifan Xu, Wendi Zheng, Xiao Xia, et al. 2022. Glm-130b: An open bilingual pre-trained model. Proceedings of the 5th International Conference on Learning Representations (2022)."},{"key":"e_1_3_2_2_52_1","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2022.3222168"},{"key":"e_1_3_2_2_53_1","doi-asserted-by":"publisher","DOI":"10.1145\/3292500.3330785"},{"key":"e_1_3_2_2_54_1","doi-asserted-by":"publisher","DOI":"10.1145\/3637528.3672354"},{"key":"e_1_3_2_2_55_1","volume-title":"KDD 2024 OAG-Challenge Cup.","author":"Zhang Xiaocheng","year":"2024","unstructured":"Xiaocheng Zhang, Yang Zhou, Haoru Chen, Mengjiao Bao, and Peng Yan. 2024b. Enhanced name disambiguation via iterative self-refining with LLMs. In KDD 2024 OAG-Challenge Cup."},{"key":"e_1_3_2_2_56_1","unstructured":"Wayne Xin Zhao Kun Zhou Junyi Li Tianyi Tang Xiaolei Wang Yupeng Hou Yingqian Min Beichen Zhang Junjie Zhang Zican Dong et al. 2023. A survey of large language models. arXiv preprint arXiv:2303.18223 (2023)."},{"key":"e_1_3_2_2_57_1","volume-title":"Proceeding of the 12th International Conference on Learning Representations. n","author":"Zhuo Wei","year":"2024","unstructured":"Wei Zhuo, Zemin Liu, Bryan Hooi, Bingsheng He, Guang Tan, Rizal Fathony, and Jia Chen. 2024. Partitioning Message Passing for Graph Fraud Detection. In Proceeding of the 12th International Conference on Learning Representations. n"}],"event":{"name":"KDD '25: The 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining","location":"Toronto ON Canada","acronym":"KDD '25","sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data"]},"container-title":["Proceedings of the 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining V.2"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3711896.3736993","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,16]],"date-time":"2025-08-16T14:33:20Z","timestamp":1755354800000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3711896.3736993"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,3]]},"references-count":57,"alternative-id":["10.1145\/3711896.3736993","10.1145\/3711896"],"URL":"https:\/\/doi.org\/10.1145\/3711896.3736993","relation":{},"subject":[],"published":{"date-parts":[[2025,8,3]]},"assertion":[{"value":"2025-08-03","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}