{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,2]],"date-time":"2026-03-02T11:26:00Z","timestamp":1772450760574,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":98,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T00:00:00Z","timestamp":1733097600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"name":"Key Research and Development Projects of Jilin Province","award":["20240302090GX"],"award-info":[{"award-number":["20240302090GX"]}]},{"DOI":"10.13039\/501100006374","name":"National Science Foundation","doi-asserted-by":"publisher","award":["CNS-2302689, CNS-2308730, CNS-2319277, CMMI-2326341, ECCS-2216926, CNS-2241713, CNS-2331302 and CNS-2339686"],"award-info":[{"award-number":["CNS-2302689, CNS-2308730, CNS-2319277, CMMI-2326341, ECCS-2216926, CNS-2241713, CNS-2331302 and CNS-2339686"]}],"id":[{"id":"10.13039\/501100006374","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100006374","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62072208"],"award-info":[{"award-number":["62072208"]}],"id":[{"id":"10.13039\/501100006374","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,12,2]]},"DOI":"10.1145\/3658644.3690187","type":"proceedings-article","created":{"date-parts":[[2024,12,9]],"date-time":"2024-12-09T12:19:20Z","timestamp":1733746760000},"page":"2829-2843","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":10,"title":["Distributed Backdoor Attacks on Federated Graph Learning and Certified Defenses"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9907-9980","authenticated-orcid":false,"given":"Yuxin","family":"Yang","sequence":"first","affiliation":[{"name":"College of Computer Science and Technology, Jilin University &amp; Department of Computer Science, Illinois Institute of Technology, Changchun, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7510-4718","authenticated-orcid":false,"given":"Qiang","family":"Li","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Jilin University, Changchun, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9785-7769","authenticated-orcid":false,"given":"Jinyuan","family":"Jia","sequence":"additional","affiliation":[{"name":"College of Information Sciences and Technology, The Pennsylvania State University, University Park, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4095-4506","authenticated-orcid":false,"given":"Yuan","family":"Hong","sequence":"additional","affiliation":[{"name":"School of Computing, University of Connecticut, Storrs, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5616-060X","authenticated-orcid":false,"given":"Binghui","family":"Wang","sequence":"additional","affiliation":[{"name":"Department of Computer Science, Illinois Institute of Technology, Chicago, USA"}]}],"member":"320","published-online":{"date-parts":[[2024,12,9]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"Jinheon Baek Wonyong Jeong Jiongdao Jin Jaehong Yoon and Sung Ju Hwang. 2023. Personalized subgraph federated learning. In ICML."},{"key":"e_1_3_2_1_2_1","unstructured":"Eugene Bagdasaryan Andreas Veit Yiqing Hua Deborah Estrin and Vitaly Shmatikov. 2020. How to backdoor federated learning. In AISTATS."},{"key":"e_1_3_2_1_3_1","volume-title":"Emergence of scaling in random networks. science","author":"Barab\u00e1si Albert-L\u00e1szl\u00f3","year":"1999","unstructured":"Albert-L\u00e1szl\u00f3 Barab\u00e1si and R\u00e9ka Albert. 1999. Emergence of scaling in random networks. science (1999)."},{"key":"e_1_3_2_1_4_1","unstructured":"Arjun Nitin Bhagoji Supriyo Chakraborty Prateek Mittal and Seraphin Calo. 2019. Analyzing federated learning through an adversarial lens. In ICML."},{"key":"e_1_3_2_1_5_1","unstructured":"Aleksandar Bojchevski Johannes Gasteiger and Stephan G\u00fcnnemann. 2020. Efficient robustness certificates for discrete data: Sparsity-aware randomized smoothing for graphs images and more. In ICML."},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i8.16849"},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP46215.2023.10179336"},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIFS.2022.3212174"},{"key":"e_1_3_2_1_9_1","unstructured":"Kangjie Chen Yuxian Meng Xiaofei Sun Shangwei Guo Tianwei Zhang Jiwei Li and Chun Fan. 2022. BadPre: Task-agnostic Backdoor Attacks to Pre-trained NLP Foundation Models. In ICLR."},{"key":"e_1_3_2_1_10_1","volume-title":"Targeted backdoor attacks on deep learning systems using data poisoning. arXiv","author":"Chen Xinyun","year":"2017","unstructured":"Xinyun Chen, Chang Liu, Bo Li, Kimberly Lu, and Dawn Song. 2017. Targeted backdoor attacks on deep learning systems using data poisoning. arXiv (2017)."},{"key":"e_1_3_2_1_11_1","volume-title":"Hardware trojan attacks on neural networks. arXiv preprint arXiv:1806.05768","author":"Clements Joseph","year":"2018","unstructured":"Joseph Clements and Yingjie Lao. 2018. Hardware trojan attacks on neural networks. arXiv preprint arXiv:1806.05768 (2018)."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.5555\/648054.743935"},{"key":"e_1_3_2_1_13_1","first-page":"1","article-title":"Benchmarking Graph Neural Networks","volume":"24","author":"Dwivedi Vijay Prakash","year":"2023","unstructured":"Vijay Prakash Dwivedi, Chaitanya K Joshi, Anh Tuan Luu, Thomas Laurent, Yoshua Bengio, and Xavier Bresson. 2023. Benchmarking Graph Neural Networks. Journal of Machine Learning Research, Vol. 24, 43 (2023), 1--48.","journal-title":"Journal of Machine Learning Research"},{"key":"e_1_3_2_1_14_1","unstructured":"FedML supports several out-of-the-box deep learning algorithms for various data types such as tabular text image graphs and Internet of Things (IoT) data. [n. d.]. https:\/\/aws.amazon.com\/blogs\/machine-learning\/part-2-federated-learning-on-aws-with-fedml-health-analytics-without-sharing-sensitive-data\/."},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"crossref","unstructured":"Leilei Gan Jiwei Li Tianwei Zhang Xiaoya Li Yuxian Meng Fei Wu Yi Yang Shangwei Guo and Chun Fan. 2022. Triggerless Backdoor Attack for NLP Tasks with Clean Labels. In ACL-HLT.","DOI":"10.18653\/v1\/2022.naacl-main.214"},{"key":"e_1_3_2_1_16_1","volume-title":"Strip: A defence against trojan attacks on deep neural networks. In ACSAC.","author":"Gao Yansong","year":"2019","unstructured":"Yansong Gao, Change Xu, Derui Wang, Shiping Chen, Damith C Ranasinghe, and Surya Nepal. 2019. Strip: A defence against trojan attacks on deep neural networks. In ACSAC."},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1109\/MCOM.012.2200596"},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1214\/aoms\/1177706098"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.2501\/IJMR-2017-050"},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"crossref","unstructured":"Xueluan Gong Yanjiao Chen Jianshuo Dong and Qian Wang. 2022. ATTEQ-NN: Attention-based QoE-aware Evasive Backdoor Attacks.. In NDSS.","DOI":"10.14722\/ndss.2022.24012"},{"key":"e_1_3_2_1_21_1","volume-title":"Backdoor attacks and defenses in federated learning: State-of-the-art, taxonomy, and future directions","author":"Gong Xueluan","year":"2022","unstructured":"Xueluan Gong, Yanjiao Chen, Qian Wang, and Weihan Kong. 2022. Backdoor attacks and defenses in federated learning: State-of-the-art, taxonomy, and future directions. IEEE Wireless Communications (2022)."},{"key":"e_1_3_2_1_22_1","volume-title":"Proc. of Machine Learning and Computer Security Workshop.","author":"Gu Tianyu","year":"2017","unstructured":"Tianyu Gu, Brendan Dolan-Gavitt, and Siddharth Garg. 2017. Badnets: Identifying vulnerabilities in the machine learning model supply chain. In Proc. of Machine Learning and Computer Security Workshop."},{"key":"e_1_3_2_1_23_1","volume-title":"MASTERKEY: Practical Backdoor Attack Against Speaker Verification Systems. In Annual International Conference on Mobile Computing and Networking. 1--15","author":"Guo Hanqing","year":"2023","unstructured":"Hanqing Guo, Xun Chen, Junfeng Guo, Li Xiao, and Qiben Yan. 2023. MASTERKEY: Practical Backdoor Attack Against Speaker Verification Systems. In Annual International Conference on Mobile Computing and Networking. 1--15."},{"key":"e_1_3_2_1_24_1","volume-title":"Tabor: A highly accurate approach to inspecting and restoring trojan backdoors in ai systems. arXiv preprint arXiv:1908.01763","author":"Guo Wenbo","year":"2019","unstructured":"Wenbo Guo, Lun Wang, Xinyu Xing, Min Du, and Dawn Song. 2019. Tabor: A highly accurate approach to inspecting and restoring trojan backdoors in ai systems. arXiv preprint arXiv:1908.01763 (2019)."},{"key":"e_1_3_2_1_25_1","unstructured":"Will Hamilton Zhitao Ying and Jure Leskovec. 2017. Inductive representation learning on large graphs. In NIPS."},{"key":"e_1_3_2_1_26_1","volume-title":"Fedgraphnn: A federated learning system and benchmark for graph neural networks. arXiv","author":"He Chaoyang","year":"2021","unstructured":"Chaoyang He, Keshav Balasubramanian, Emir Ceyani, Carl Yang, Han Xie, Lichao Sun, Lifang He, Liangwei Yang, Philip S Yu, Yu Rong, et al. 2021. Fedgraphnn: A federated learning system and benchmark for graph neural networks. arXiv (2021)."},{"key":"e_1_3_2_1_27_1","volume-title":"Spreadgnn: Decentralized multi-task federated learning for graph neural networks on molecular data. In AAAI.","author":"He Chaoyang","year":"2022","unstructured":"Chaoyang He, Emir Ceyani, Keshav Balasubramanian, Murali Annavaram, and Salman Avestimehr. 2022. Spreadgnn: Decentralized multi-task federated learning for graph neural networks on molecular data. In AAAI."},{"key":"e_1_3_2_1_28_1","volume-title":"Unicr: Universally approximated certified robustness via randomized smoothing. In ECCV.","author":"Hong Hanbin","year":"2022","unstructured":"Hanbin Hong, Binghui Wang, and Yuan Hong. 2022. Unicr: Universally approximated certified robustness via randomized smoothing. In ECCV."},{"key":"e_1_3_2_1_29_1","unstructured":"How AWS uses graph neural networks to meet customer needs. [n. d.]. https:\/\/www.amazon.science\/blog\/how-aws-uses-graph-neural-networks-to-meet-customer-needs\/."},{"key":"e_1_3_2_1_30_1","unstructured":"Jinyuan Jia Xiaoyu Cao and Neil Zhenqiang Gong. 2021. Intrinsic certified robustness of bagging against data poisoning attacks. In AAAI."},{"key":"e_1_3_2_1_31_1","unstructured":"Jinyuan Jia Yupei Liu Xiaoyu Cao and Neil Zhenqiang Gong. 2022. Certified robustness of nearest neighbors against data poisoning and backdoor attacks. In AAAI."},{"key":"e_1_3_2_1_32_1","unstructured":"Thomas N Kipf and Max Welling. 2017. Semi-Supervised Classification with Graph Convolutional Networks. In ICLR."},{"key":"e_1_3_2_1_33_1","unstructured":"Alexander Levine and Soheil Feizi. 2020. (De) Randomized smoothing for certifiable defense against patch attacks. In NeurIPS."},{"key":"e_1_3_2_1_34_1","unstructured":"Alexander Levine and Soheil Feizi. 2020. Deep Partition Aggregation: Provable Defenses against General Poisoning Attacks. In ICLR."},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"crossref","unstructured":"Alexander Levine and Soheil Feizi. 2020. Robustness certificates for sparse adversarial attacks by randomized ablation. In AAAI.","DOI":"10.1609\/aaai.v34i04.5888"},{"key":"e_1_3_2_1_36_1","volume-title":"Hu-fu: Hardware and software collaborative attack framework against neural networks","author":"Li Wenshuo","year":"2018","unstructured":"Wenshuo Li, Jincheng Yu, Xuefei Ning, Pengjun Wang, Qi Wei, Yu Wang, and Huazhong Yang. 2018. Hu-fu: Hardware and software collaborative attack framework against neural networks. In ISVLSI. IEEE."},{"key":"e_1_3_2_1_37_1","volume-title":"Yuelong Wang, and Yusen Wang.","author":"Liao Wenlong","year":"2021","unstructured":"Wenlong Liao, Birgitte Bak-Jensen, Jayakrishnan Radhakrishna Pillai, Yuelong Wang, and Yusen Wang. 2021. A review of graph neural networks and their applications in power systems. Journal of Modern Power Systems and Clean Energy (2021)."},{"key":"e_1_3_2_1_38_1","volume-title":"Fine-pruning: Defending against backdooring attacks on deep neural networks. In RAID.","author":"Liu Kang","year":"2018","unstructured":"Kang Liu, Brendan Dolan-Gavitt, and Siddharth Garg. 2018. Fine-pruning: Defending against backdooring attacks on deep neural networks. In RAID."},{"key":"e_1_3_2_1_39_1","volume-title":"ABS: Scanning neural networks for back-doors by artificial brain stimulation. In SIGSAC.","author":"Liu Yingqi","year":"2019","unstructured":"Yingqi Liu, Wen-Chuan Lee, Guanhong Tao, Shiqing Ma, Yousra Aafer, and Xiangyu Zhang. 2019. ABS: Scanning neural networks for back-doors by artificial brain stimulation. In SIGSAC."},{"key":"e_1_3_2_1_40_1","unstructured":"Yingqi Liu Shiqing Ma Yousra Aafer Wen-Chuan Lee Juan Zhai Weihang Wang and Xiangyu Zhang. 2018. Trojaning attack on neural networks. In NDSS."},{"key":"e_1_3_2_1_41_1","unstructured":"Brendan McMahan Eider Moore Daniel Ramage Seth Hampson and Blaise Aguera y Arcas. 2017. Communication-efficient learning of deep networks from decentralized data. In Artificial intelligence and statistics."},{"key":"e_1_3_2_1_42_1","volume-title":"FLAME: Taming Backdoors in Federated Learning. In 31st USENIX Security Symposium.","author":"Nguyen Thien Duc","year":"2022","unstructured":"Thien Duc Nguyen, Phillip Rieger, Huili Chen, Hossein Yalame, Helen M\u00f6llering, Hossein Fereidooni, Samuel Marchal, Markus Miettinen, Azalia Mirhoseini, Shaza Zeitouni, et al. 2022. FLAME: Taming Backdoors in Federated Learning. In 31st USENIX Security Symposium."},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"crossref","unstructured":"Mustafa Safa Ozdayi Murat Kantarcioglu and Yulia R Gel. 2021. Defending against backdoors in federated learning with robust learning rate. In AAAIConference on Artificial Intelligence. 9268--9276.","DOI":"10.1609\/aaai.v35i10.17118"},{"key":"e_1_3_2_1_44_1","volume-title":"Towards Understanding How Self-training Tolerates Data Backdoor Poisoning. arXiv preprint arXiv:2301.08751","author":"Pal Soumyadeep","year":"2023","unstructured":"Soumyadeep Pal, Ren Wang, Yuguang Yao, and Sijia Liu. 2023. Towards Understanding How Self-training Tolerates Data Backdoor Poisoning. arXiv preprint arXiv:2301.08751 (2023)."},{"key":"e_1_3_2_1_45_1","volume-title":"31st USENIX Security Symposium (USENIX Security 22)","author":"Pan Xudong","year":"2022","unstructured":"Xudong Pan, Mi Zhang, Beina Sheng, Jiaming Zhu, and Min Yang. 2022. Hidden trigger backdoor attack on NLP models via linguistic style manipulation. In 31st USENIX Security Symposium (USENIX Security 22)."},{"key":"e_1_3_2_1_46_1","volume-title":"Textguard: Provable defense against backdoor attacks on text classification. In NDSS.","author":"Pei Hengzhi","year":"2023","unstructured":"Hengzhi Pei, Jinyuan Jia, Wenbo Guo, Bo Li, and Dawn Song. 2023. Textguard: Provable defense against backdoor attacks on text classification. In NDSS."},{"key":"e_1_3_2_1_47_1","volume-title":"Fedni: Federated graph learning with network inpainting for population-based disease prediction","author":"Peng Liang","year":"2022","unstructured":"Liang Peng, Nan Wang, Nicha Dvornek, Xiaofeng Zhu, and Xiaoxiao Li. 2022. Fedni: Federated graph learning with network inpainting for population-based disease prediction. IEEE Transactions on Medical Imaging (2022)."},{"key":"e_1_3_2_1_48_1","volume-title":"Hidden Killer: Invisible Textual Backdoor Attacks with Syntactic Trigger. In ACL.","author":"Qi Fanchao","year":"2021","unstructured":"Fanchao Qi, Mukai Li, Yangyi Chen, Zhengyan Zhang, Zhiyuan Liu, Yasheng Wang, and Maosong Sun. 2021. Hidden Killer: Invisible Textual Backdoor Attacks with Syntactic Trigger. In ACL."},{"key":"e_1_3_2_1_49_1","volume-title":"Markus Miettinen, and Ahmad-Reza Sadeghi.","author":"Rieger Phillip","year":"2022","unstructured":"Phillip Rieger, Thien Duc Nguyen, Markus Miettinen, and Ahmad-Reza Sadeghi. 2022. Deepsight: Mitigating backdoor attacks in federated learning through deep model inspection. arXiv preprint arXiv:2201.00763 (2022)."},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"publisher","DOI":"10.1145\/3081333.3081366"},{"key":"e_1_3_2_1_51_1","unstructured":"Aniruddha Saha Akshayvarun Subramanya and Hamed Pirsiavash. 2020. Hidden trigger backdoor attacks. In AAAI."},{"key":"e_1_3_2_1_52_1","doi-asserted-by":"crossref","unstructured":"Sina Sajadmanesh and Daniel Gatica-Perez. 2021. Locally private graph neural networks. In CCS.","DOI":"10.1145\/3460120.3484565"},{"key":"e_1_3_2_1_53_1","doi-asserted-by":"crossref","unstructured":"Ahmed Salem Rui Wen Michael Backes Shiqing Ma and Yang Zhang. 2022. Dynamic Backdoor Attacks Against Machine Learning Models. In EuroSP.","DOI":"10.1109\/EuroSP53844.2022.00049"},{"key":"e_1_3_2_1_54_1","volume-title":"Markus Hagenbuchner, and Gabriele Monfardini.","author":"Scarselli Franco","year":"2008","unstructured":"Franco Scarselli, Marco Gori, Ah Chung Tsoi, Markus Hagenbuchner, and Gabriele Monfardini. 2008. The graph neural network model. IEEE transactions on neural networks, Vol. 20, 1 (2008), 61--80."},{"key":"e_1_3_2_1_55_1","volume-title":"Randomized message-interception smoothing: Gray-box certificates for graph neural networks. NeurIPS","author":"Scholten Yan","year":"2022","unstructured":"Yan Scholten, Jan Schuchardt, Simon Geisler, Aleksandar Bojchevski, and Stephan G\u00fcnnemann. 2022. Randomized message-interception smoothing: Gray-box certificates for graph neural networks. NeurIPS (2022)."},{"key":"e_1_3_2_1_56_1","doi-asserted-by":"publisher","DOI":"10.1145\/3495243.3560531"},{"key":"e_1_3_2_1_57_1","volume-title":"Ananda Theertha Suresh, and H Brendan McMahan","author":"Sun Ziteng","year":"2019","unstructured":"Ziteng Sun, Peter Kairouz, Ananda Theertha Suresh, and H Brendan McMahan. 2019. Can you really backdoor federated learning? arXiv (2019)."},{"key":"e_1_3_2_1_58_1","doi-asserted-by":"crossref","unstructured":"Yue Tan Yixin Liu Guodong Long Jing Jiang Qinghua Lu and Chengqi Zhang. 2023. Federated learning on non-iid graphs via structural knowledge sharing. In AAAI.","DOI":"10.1609\/aaai.v37i8.26187"},{"key":"e_1_3_2_1_59_1","volume-title":"2024 IEEE Symposium on Security and Privacy (SP).","author":"Tao Guanhong","year":"2023","unstructured":"Guanhong Tao, Zhenting Wang, Shiwei Feng, Guangyu Shen, Shiqing Ma, and Xiangyu Zhang. 2023. Distribution preserving backdoor attack in self-supervised learning. In 2024 IEEE Symposium on Security and Privacy (SP)."},{"key":"e_1_3_2_1_60_1","volume-title":"Estimating the number of clusters in a data set via the gap statistic. Journal of the Royal Statistical Society: Series B (Statistical Methodology)","author":"Tibshirani Robert","year":"2001","unstructured":"Robert Tibshirani, Guenther Walther, and Trevor Hastie. 2001. Estimating the number of clusters in a data set via the gap statistic. Journal of the Royal Statistical Society: Series B (Statistical Methodology) (2001)."},{"key":"e_1_3_2_1_61_1","unstructured":"Traffic prediction with advanced Graph Neural Networks. [n. d.]. https:\/\/deepmind.google\/discover\/blog\/traffic-prediction-with-advanced-graph-neural-networks\/."},{"key":"e_1_3_2_1_62_1","unstructured":"Brandon Tran Jerry Li and Aleksander Madry. 2018. Spectral signatures in backdoor attacks. In NeurIPS."},{"key":"e_1_3_2_1_63_1","volume-title":"CVPR Workshop.","author":"Wang Binghui","year":"2020","unstructured":"Binghui Wang, Xiaoyu Cao, Jinyuan Jia, and Neil Zhenqiang Gong. 2020. On Certifying Robustness against Backdoor Attacks via Randomized Smoothing. In CVPR Workshop."},{"key":"e_1_3_2_1_64_1","doi-asserted-by":"crossref","unstructured":"Binghui Wang Jinyuan Jia Xiaoyu Cao and Neil Zhenqiang Gong. 2021. Certified robustness of graph neural networks against adversarial structural perturbation. In KDD.","DOI":"10.1145\/3447548.3467295"},{"key":"e_1_3_2_1_65_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICDM54844.2022.00060"},{"key":"e_1_3_2_1_66_1","volume-title":"Neural cleanse: Identifying and mitigating backdoor attacks in neural networks","author":"Wang Bolun","unstructured":"Bolun Wang, Yuanshun Yao, Shawn Shan, Huiying Li, Bimal Viswanath, Haitao Zheng, and Ben Y Zhao. 2019. Neural cleanse: Identifying and mitigating backdoor attacks in neural networks. In IEEE S&P."},{"key":"e_1_3_2_1_67_1","unstructured":"Hongyi Wang Kartik Sreenivasan Shashank Rajput Harit Vishwakarma Saurabh Agarwal Jy-yong Sohn Kangwook Lee and Dimitris Papailiopoulos. 2020. Attack of the tails: Yes you really can backdoor federated learning. In NeurIPS."},{"key":"e_1_3_2_1_68_1","doi-asserted-by":"crossref","unstructured":"Ren Wang Gaoyuan Zhang Sijia Liu Pin-Yu Chen Jinjun Xiong and Meng Wang. 2020. Practical detection of trojan neural networks: Data-limited and data-free cases. In ECCV. 222--238.","DOI":"10.1007\/978-3-030-58592-1_14"},{"key":"e_1_3_2_1_69_1","volume-title":"Federatedscope-gnn: Towards a unified, comprehensive and efficient package for federated graph learning. In KDD.","author":"Wang Zhen","year":"2022","unstructured":"Zhen Wang, Weirui Kuang, Yuexiang Xie, Liuyi Yao, Yaliang Li, Bolin Ding, and Jingren Zhou. 2022. Federatedscope-gnn: Towards a unified, comprehensive and efficient package for federated graph learning. In KDD."},{"key":"e_1_3_2_1_70_1","volume-title":"Collective dynamics of `small-world'networks. nature","author":"Watts Duncan J","year":"1998","unstructured":"Duncan J Watts and Steven H Strogatz. 1998. Collective dynamics of `small-world'networks. nature (1998)."},{"key":"e_1_3_2_1_71_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP46215.2023.10179451"},{"key":"e_1_3_2_1_72_1","volume-title":"Yuanshun Yao, Haitao Zheng, and Ben Y Zhao.","author":"Wenger Emily","year":"2021","unstructured":"Emily Wenger, Josephine Passananti, Arjun Nitin Bhagoji, Yuanshun Yao, Haitao Zheng, and Ben Y Zhao. 2021. Backdoor attacks against deep learning systems in the physical world. In CVPR."},{"key":"e_1_3_2_1_73_1","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0228728"},{"key":"e_1_3_2_1_74_1","doi-asserted-by":"publisher","DOI":"10.1038\/s41467-022-30714-9"},{"key":"e_1_3_2_1_75_1","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.2978386"},{"key":"e_1_3_2_1_76_1","unstructured":"Zhaohan Xi Ren Pang Shouling Ji and Ting Wang. 2021. Graph backdoor. In USENIX Security."},{"key":"e_1_3_2_1_77_1","volume-title":"Vikash Sehwag, and Prateek Mittal.","author":"Xiang Chong","year":"2021","unstructured":"Chong Xiang, Arjun Nitin Bhagoji, Vikash Sehwag, and Prateek Mittal. 2021. PatchGuard: A provably robust defense against adversarial patches via small receptive fields and masking. In USENIX Security."},{"key":"e_1_3_2_1_78_1","volume-title":"Crfl: Certifiably robust federated learning against backdoor attacks. In ICML.","author":"Xie Chulin","year":"2021","unstructured":"Chulin Xie, Minghao Chen, Pin-Yu Chen, and Bo Li. 2021. Crfl: Certifiably robust federated learning against backdoor attacks. In ICML."},{"key":"e_1_3_2_1_79_1","volume-title":"Dba: Distributed backdoor attacks against federated learning. In ICLR.","author":"Xie Chulin","year":"2019","unstructured":"Chulin Xie, Keli Huang, Pin-Yu Chen, and Bo Li. 2019. Dba: Distributed backdoor attacks against federated learning. In ICLR."},{"key":"e_1_3_2_1_80_1","unstructured":"Chulin Xie Yunhui Long Pin-Yu Chen Qinbin Li Sanmi Koyejo and Bo Li. 2023. Unraveling the Connections between Privacy and Certified Robustness in Federated Learning Against Poisoning Attacks. In CCS."},{"key":"e_1_3_2_1_81_1","volume-title":"NeurIPS","volume":"34","author":"Xie Han","year":"2021","unstructured":"Han Xie, Jing Ma, Li Xiong, and Carl Yang. 2021. Federated graph classification over non-iid graphs. In NeurIPS, Vol. 34."},{"key":"e_1_3_2_1_82_1","doi-asserted-by":"publisher","DOI":"10.1145\/3543507.3583471"},{"key":"e_1_3_2_1_83_1","doi-asserted-by":"publisher","DOI":"10.1109\/TDSC.2022.3163397"},{"key":"e_1_3_2_1_84_1","doi-asserted-by":"crossref","unstructured":"Jing Xu Rui Wang Stefanos Koffas Kaitai Liang and Stjepan Picek. 2022. More is better (mostly): On the backdoor attacks in federated graph neural networks. In ACSAC. 684--698.","DOI":"10.1145\/3564625.3567999"},{"key":"e_1_3_2_1_85_1","unstructured":"Keyulu Xu Weihua Hu Jure Leskovec and Stefanie Jegelka. 2019. How Powerful are Graph Neural Networks?. In ICLR."},{"key":"e_1_3_2_1_86_1","unstructured":"Keyulu Xu Weihua Hu Jure Leskovec and Stefanie Jegelka. 2019. How powerful are graph neural networks?. In ICLR."},{"key":"e_1_3_2_1_87_1","volume-title":"33rd USENIX Security Symposium (USENIX Security 24)","author":"Yan Shenao","year":"2024","unstructured":"Shenao Yan, Shen Wang, Yue Duan, Hanbin Hong, Kiho Lee, Doowon Kim, and Yuan Hong. 2024. An LLM-Assisted Easy-to-Trigger Backdoor Attack on Code Completion Models: Injecting Disguised Vulnerabilities against Strong Detection. In 33rd USENIX Security Symposium (USENIX Security 24)."},{"key":"e_1_3_2_1_88_1","volume-title":"The Twelfth International Conference on Learning Representations.","author":"Yang Han","year":"2024","unstructured":"Han Yang, Binghui Wang, Jinyuan Jia, et al. 2024. GNNCert: Deterministic Certification of Graph Neural Networks against Adversarial Perturbations. In The Twelfth International Conference on Learning Representations."},{"key":"e_1_3_2_1_89_1","unstructured":"Yuanshun Yao Huiying Li Haitao Zheng and Ben Y Zhao. 2019. Latent Backdoor Attacks on Deep Neural Networks. In CCS."},{"key":"e_1_3_2_1_90_1","doi-asserted-by":"crossref","unstructured":"Rex Ying Ruining He Kaifeng Chen Pong Eksombatchai William L Hamilton and Jure Leskovec. 2018. Graph convolutional neural networks for web-scale recommender systems. In KDD.","DOI":"10.1145\/3219819.3219890"},{"key":"e_1_3_2_1_91_1","volume-title":"Subgraph federated learning with missing neighbor generation. NeurIPS","author":"Zhang Ke","year":"2021","unstructured":"Ke Zhang, Carl Yang, Xiaoxiao Li, Lichao Sun, and Siu Ming Yiu. 2021. Subgraph federated learning with missing neighbor generation. NeurIPS (2021)."},{"key":"e_1_3_2_1_92_1","volume-title":"Text-crs: A generalized certified robustness framework against textual adversarial attacks","author":"Zhang Xinyu","year":"2024","unstructured":"Xinyu Zhang, Hanbin Hong, Yuan Hong, Peng Huang, Binghui Wang, Zhongjie Ba, and Kui Ren. 2024. Text-crs: A generalized certified robustness framework against textual adversarial attacks. In IEEE SP."},{"key":"e_1_3_2_1_93_1","doi-asserted-by":"crossref","unstructured":"Zaixi Zhang Xiaoyu Cao Jinyuan Jia and Neil Zhenqiang Gong. 2022. FLDetector: Defending federated learning against model poisoning attacks via detecting malicious clients. In KDD.","DOI":"10.1145\/3534678.3539231"},{"key":"e_1_3_2_1_94_1","doi-asserted-by":"crossref","unstructured":"Zaixi Zhang Jinyuan Jia Binghui Wang and Neil Zhenqiang Gong. 2021. Backdoor attacks to graph neural networks. In SACMAT.","DOI":"10.1145\/3450569.3463560"},{"key":"e_1_3_2_1_95_1","volume-title":"Neurotoxin: Durable backdoors in federated learning. In ICML.","author":"Zhang Zhengming","year":"2022","unstructured":"Zhengming Zhang, Ashwinee Panda, Linyue Song, Yaoqing Yang, Michael Mahoney, Prateek Mittal, Ramchandran Kannan, and Joseph Gonzalez. 2022. Neurotoxin: Durable backdoors in federated learning. In ICML."},{"key":"e_1_3_2_1_96_1","doi-asserted-by":"crossref","unstructured":"Shihao Zhao Xingjun Ma Xiang Zheng James Bailey Jingjing Chen and Yu-Gang Jiang. 2020. Clean-label backdoor attacks on video recognition models. In CVPR. 14443--14452.","DOI":"10.1109\/CVPR42600.2020.01445"},{"key":"e_1_3_2_1_97_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.aiopen.2021.01.001"},{"key":"e_1_3_2_1_98_1","doi-asserted-by":"publisher","DOI":"10.14778\/3352063.3352127"}],"event":{"name":"CCS '24: ACM SIGSAC Conference on Computer and Communications Security","location":"Salt Lake City UT USA","acronym":"CCS '24","sponsor":["SIGSAC ACM Special Interest Group on Security, Audit, and Control"]},"container-title":["Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3658644.3690187","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3658644.3690187","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T05:56:56Z","timestamp":1755842216000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3658644.3690187"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,2]]},"references-count":98,"alternative-id":["10.1145\/3658644.3690187","10.1145\/3658644"],"URL":"https:\/\/doi.org\/10.1145\/3658644.3690187","relation":{},"subject":[],"published":{"date-parts":[[2024,12,2]]},"assertion":[{"value":"2024-12-09","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}