{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,11]],"date-time":"2025-11-11T12:31:15Z","timestamp":1762864275565,"version":"build-2065373602"},"publisher-location":"New York, NY, USA","reference-count":33,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,11,19]]},"DOI":"10.1145\/3736425.3772355","type":"proceedings-article","created":{"date-parts":[[2025,11,11]],"date-time":"2025-11-11T12:21:55Z","timestamp":1762863715000},"page":"389-393","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["SifterNet: Model-Agnostic Defense against Backdoor Attack in Vision Large Model"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0000-0058-2811","authenticated-orcid":false,"given":"Shaoye","family":"Luo","sequence":"first","affiliation":[{"name":"University of Chinese Academy of Sciences, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6659-7431","authenticated-orcid":false,"given":"Xinxin","family":"Fan","sequence":"additional","affiliation":[{"name":"Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7670-7729","authenticated-orcid":false,"given":"Quanliang","family":"Jing","sequence":"additional","affiliation":[{"name":"Institute of Computing Technology, Beijing, China, Beijing, Chile"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-6317-497X","authenticated-orcid":false,"given":"Men","family":"Niu","sequence":"additional","affiliation":[{"name":"University of Chinese Academy of Sciences, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0302-5102","authenticated-orcid":false,"given":"Chi","family":"Lin","sequence":"additional","affiliation":[{"name":"Dalian University of Technology, Dalian, LiaoNing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-5201-180X","authenticated-orcid":false,"given":"Yunfeng","family":"Lu","sequence":"additional","affiliation":[{"name":"Beihang University, Beijing, China, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2025,11,11]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"2019 IEEE International Conference on Image Processing, ICIP 2019","author":"Barni Mauro","year":"2019","unstructured":"Mauro Barni, Kassem Kallas, and Benedetta Tondi. [n. d.]. A New Backdoor Attack in CNNS by Training Set Corruption Without Label Poisoning. In 2019 IEEE International Conference on Image Processing, ICIP 2019, Taipei, Taiwan, September 22\u201325, 2019. 101\u2013105."},{"key":"e_1_3_2_1_2_1","volume-title":"DeepInspect: A Black-box Trojan Detection and Mitigation Framework for Deep Neural Networks. In Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, IJCAI 2019","author":"Chen Huili","year":"2019","unstructured":"Huili Chen, Cheng Fu, Jishen Zhao, and Farinaz Koushanfar. [n. d.]. DeepInspect: A Black-box Trojan Detection and Mitigation Framework for Deep Neural Networks. In Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, IJCAI 2019, Macao, China, August 10\u201316, 2019, Sarit Kraus (Ed.). 4658\u20134664."},{"key":"e_1_3_2_1_3_1","volume-title":"Targeted Backdoor Attacks on Deep Learning Systems Using Data Poisoning. CoRR abs\/1712.05526","author":"Chen Xinyun","year":"2017","unstructured":"Xinyun Chen, Chang Liu, Bo Li, Kimberly Lu, and Dawn Song. 2017. Targeted Backdoor Attacks on Deep Learning Systems Using Data Poisoning. CoRR abs\/1712.05526 (2017)."},{"key":"e_1_3_2_1_4_1","volume-title":"SentiNet: Detecting Localized Universal Attacks Against Deep Learning Systems. In 2020 IEEE Security and Privacy Workshops, SP Workshops","author":"Chou Edward","year":"2020","unstructured":"Edward Chou, Florian Tram\u00e8r, and Giancarlo Pellegrino. [n. d.]. SentiNet: Detecting Localized Universal Attacks Against Deep Learning Systems. In 2020 IEEE Security and Privacy Workshops, SP Workshops, San Francisco, CA, USA, May 21, 2020. 48\u201354."},{"key":"e_1_3_2_1_5_1","volume-title":"Februus: Input Purification Defense Against Trojan Attacks on Deep Neural Network Systems. In ACSAC '20: Annual Computer Security Applications Conference, Virtual Event \/ Austin, TX, USA, 7\u201311","author":"Doan Bao Gia","year":"2020","unstructured":"Bao Gia Doan, Ehsan Abbasnejad, and Damith C. Ranasinghe. [n. d.]. Februus: Input Purification Defense Against Trojan Attacks on Deep Neural Network Systems. In ACSAC '20: Annual Computer Security Applications Conference, Virtual Event \/ Austin, TX, USA, 7\u201311 December, 2020. 897\u2013912."},{"key":"e_1_3_2_1_6_1","volume-title":"Black-box Detection of Backdoor Attacks with Limited Information and Data. In 2021 IEEE\/CVF International Conference on Computer Vision, ICCV 2021","author":"Dong Yinpeng","year":"2021","unstructured":"Yinpeng Dong, Xiao Yang, Zhijie Deng, Tianyu Pang, Zihao Xiao, Hang Su, and Jun Zhu. [n. d.]. Black-box Detection of Backdoor Attacks with Limited Information and Data. In 2021 IEEE\/CVF International Conference on Computer Vision, ICCV 2021, Montreal, QC, Canada, October 10\u201317, 2021. 16462\u201316471."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIFS.2023.3297056"},{"key":"e_1_3_2_1_8_1","volume-title":"Proceedings of the 35th Annual Computer Security Applications Conference, ACSAC 2019","author":"Gao Yansong","year":"2019","unstructured":"Yansong Gao, Chang Xu, Derui Wang, Shiping Chen, Damith Chinthana Ranasinghe, and Surya Nepal. [n. d.]. STRIP: a defence against trojan attacks on deep neural networks. In Proceedings of the 35th Annual Computer Security Applications Conference, ACSAC 2019, San Juan, PR, USA, December 09\u201313, 2019, David M. Balenson (Ed.). 113\u2013125."},{"key":"e_1_3_2_1_9_1","volume-title":"BadNets: Identifying Vulnerabilities in the Machine Learning Model Supply Chain. CoRR abs\/1708.06733","author":"Gu Tianyu","year":"2017","unstructured":"Tianyu Gu, Brendan Dolan-Gavitt, and Siddharth Garg. 2017. BadNets: Identifying Vulnerabilities in the Machine Learning Model Supply Chain. CoRR abs\/1708.06733 (2017)."},{"key":"e_1_3_2_1_10_1","volume-title":"The Eleventh International Conference on Learning Representations, ICLR 2023","author":"Guo Junfeng","year":"2023","unstructured":"Junfeng Guo, Yiming Li, Xun Chen, Hanqing Guo, Lichao Sun, and Cong Liu. [n. d.]. SCALE-UP: An Efficient Black-box Input-level Backdoor Detection via Analyzing Scaled Prediction Consistency. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1\u20135, 2023."},{"key":"e_1_3_2_1_11_1","unstructured":"D. O. Hebb. [n. d.]. The Organization of Behavior: A Neuropsychological Theory."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.79.8.2554"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.81.10.3088"},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.xinn.2025.100948"},{"key":"e_1_3_2_1_15_1","volume-title":"The Tenth International Conference on Learning Representations, ICLR 2022","author":"Huang Kunzhe","year":"2022","unstructured":"Kunzhe Huang, Yiming Li, Baoyuan Wu, Zhan Qin, and Kui Ren. [n. d.]. Backdoor Defense via Decoupling the Training Process. In The Tenth International Conference on Learning Representations, ICLR 2022, Virtual Event, April 25\u201329, 2022."},{"key":"e_1_3_2_1_16_1","unstructured":"A. Krizhevsky and G. Hinton. 2009. Learning Multiple Layers of Features from Tiny Images. Technical Report. University of Toronto."},{"key":"e_1_3_2_1_17_1","volume-title":"BayBFed: Bayesian Backdoor Defense for Federated Learning. In 44th IEEE Symposium on Security and Privacy, SP 2023","author":"Kumari Kavita","year":"2023","unstructured":"Kavita Kumari, Phillip Rieger, Hossein Fereidooni, Murtuza Jadliwala, and Ahmad-Reza Sadeghi. [n. d.]. BayBFed: Bayesian Backdoor Defense for Federated Learning. In 44th IEEE Symposium on Security and Privacy, SP 2023, San Francisco, CA, USA, May 21\u201325, 2023. 737\u2013754."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1109\/5.726791"},{"key":"e_1_3_2_1_19_1","volume-title":"Urban Computing in the Era of Large Language Models. arXiv:2504.02009 abs\/2504.02009","author":"Li Zhonghang","year":"2025","unstructured":"Zhonghang Li, Lianghao Xia, Xubin Ren, Jiabin Tang, Tianyi Chen, Yong Xu, and Chao Huang. 2025. Urban Computing in the Era of Large Language Models. arXiv:2504.02009 abs\/2504.02009 (2025)."},{"key":"e_1_3_2_1_20_1","volume-title":"Detecting Backdoors During the Inference Stage Based on Corruption Robustness Consistency. In IEEE\/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2023","author":"Liu Xiaogeng","year":"2023","unstructured":"Xiaogeng Liu, Minghui Li, Haoyu Wang, Shengshan Hu, Dengpan Ye, Hai Jin, Libing Wu, and Chaowei Xiao. [n. d.]. Detecting Backdoors During the Inference Stage Based on Corruption Robustness Consistency. In IEEE\/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2023, Vancouver, BC, Canada, June 17\u201324, 2023. 16363\u201316372."},{"key":"e_1_3_2_1_21_1","volume-title":"WaNet - Imperceptible Warping-based Backdoor Attack. In 9th International Conference on Learning Representations, ICLR 2021","author":"Nguyen Tuan Anh","year":"2021","unstructured":"Tuan Anh Nguyen and Anh Tuan Tran. [n. d.]. WaNet - Imperceptible Warping-based Backdoor Attack. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3\u20137, 2021."},{"key":"e_1_3_2_1_22_1","volume-title":"Input-Aware Dynamic Backdoor Attack. In Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020","author":"Nguyen Tuan Anh","year":"2020","unstructured":"Tuan Anh Nguyen and Anh Tuan Tran. 2020. Input-Aware Dynamic Backdoor Attack. In Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6\u201312, 2020, virtual, Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin (Eds.)."},{"key":"e_1_3_2_1_23_1","volume-title":"FLAME: Taming Backdoors in Federated Learning. In 31st USENIX Security Symposium, USENIX Security 2022","author":"Nguyen Thien Duc","year":"2022","unstructured":"Thien Duc Nguyen, Phillip Rieger, Huili Chen, Hossein Yalame, Helen M\u00f6llering, Hossein Fereidooni, Samuel Marchal, Markus Miettinen, Azalia Mirhoseini, Shaza Zeitouni, Farinaz Koushanfar, Ahmad-Reza Sadeghi, and Thomas Schneider. [n. d.]. FLAME: Taming Backdoors in Federated Learning. In 31st USENIX Security Symposium, USENIX Security 2022, Boston, MA, USA, August 10\u201312, 2022, Kevin R. B. Butler and Kurt Thomas (Eds.). 1415\u20131432."},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.1979.4310076"},{"key":"e_1_3_2_1_25_1","volume-title":"Towards A Proactive ML Approach for Detecting Backdoor Poison Samples. In 32nd USENIX Security Symposium, USENIX Security 2023","author":"Qi Xiangyu","year":"2023","unstructured":"Xiangyu Qi, Tinghao Xie, Jiachen T. Wang, Tong Wu, Saeed Mahloujifar, and Prateek Mittal. [n. d.]. Towards A Proactive ML Approach for Detecting Backdoor Poison Samples. In 32nd USENIX Security Symposium, USENIX Security 2023, Anaheim, CA, USA, August 9\u201311, 2023, Joseph A. Calandrino and Carmela Troncoso (Eds.). 1685\u20131702."},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"crossref","first-page":"103","DOI":"10.1109\/MDAT.2020.2968275","article-title":"Backdoor Suppression in Neural Networks using Input Fuzzing and Majority Voting","volume":"37","author":"Sarkar Esha","year":"2020","unstructured":"Esha Sarkar, Yousif Alkindi, and Michail Maniatakos. 2020. Backdoor Suppression in Neural Networks using Input Fuzzing and Majority Voting. IEEE Des. Test 37, 2 (2020), 103\u2013110.","journal-title":"IEEE Des. Test"},{"key":"e_1_3_2_1_27_1","volume-title":"Demon in the Variant: Statistical Analysis of DNNs for Robust Backdoor Contamination Detection. In 30th USENIX Security Symposium, USENIX Security 2021","author":"Tang Di","year":"2021","unstructured":"Di Tang, XiaoFeng Wang, Haixu Tang, and Kehuan Zhang. [n. d.]. Demon in the Variant: Statistical Analysis of DNNs for Robust Backdoor Contamination Detection. In 30th USENIX Security Symposium, USENIX Security 2021, August 11\u201313, 2021, Michael D. Bailey and Rachel Greenstadt (Eds.). 1541\u20131558."},{"key":"e_1_3_2_1_28_1","volume-title":"GTAT: Adversarial Training with Generated Triplets. In International Joint Conference on Neural Networks, IJCNN 2022","author":"Wang Baoli","year":"2022","unstructured":"Baoli Wang, Xinxin Fan, Quanliang Jing, Yueyang Su, Jingwei Li, and Jingping Bi. 2022. GTAT: Adversarial Training with Generated Triplets. In International Joint Conference on Neural Networks, IJCNN 2022, Padua, Italy, July 18\u201323, 2022. IEEE, 1\u20138."},{"key":"e_1_3_2_1_29_1","volume-title":"AdvCGAN: An Elastic and Covert Adversarial Examples Generating Framework. In International Joint Conference on Neural Networks, IJCNN 2021","author":"Wang Baoli","year":"2021","unstructured":"Baoli Wang, Xinxin Fan, Quanliang Jing, Haining Tan, and Jingping Bi. 2021. AdvCGAN: An Elastic and Covert Adversarial Examples Generating Framework. In International Joint Conference on Neural Networks, IJCNN 2021, Shenzhen China, July 18\u201322, 2021. IEEE, 1\u20138."},{"key":"e_1_3_2_1_30_1","volume-title":"Neural Cleanse: Identifying and Mitigating Backdoor Attacks in Neural Networks. In 2019 IEEE Symposium on Security and Privacy, SP 2019","author":"Wang Bolun","year":"2019","unstructured":"Bolun Wang, Yuanshun Yao, Shawn Shan, Huiying Li, Bimal Viswanath, Haitao Zheng, and Ben Y. Zhao. [n. d.]. Neural Cleanse: Identifying and Mitigating Backdoor Attacks in Neural Networks. In 2019 IEEE Symposium on Security and Privacy, SP 2019, San Francisco, CA, USA, May 19\u201323, 2019. 707\u2013723."},{"key":"e_1_3_2_1_31_1","volume-title":"Backdoor-Bench: A Comprehensive Benchmark and Analysis of Backdoor Learning. CoRR abs\/2407.19845","author":"Wu Baoyuan","year":"2024","unstructured":"Baoyuan Wu, Hongrui Chen, Mingda Zhang, Zihao Zhu, Shaokui Wei, Danni Yuan, Mingli Zhu, Ruotong Wang, Li Liu, and Chao Shen. 2024. Backdoor-Bench: A Comprehensive Benchmark and Analysis of Backdoor Learning. CoRR abs\/2407.19845 (2024)."},{"key":"e_1_3_2_1_32_1","volume-title":"Fashion-MNIST: a Novel Image Dataset for Benchmarking Machine Learning Algorithms. CoRR abs\/1708.07747","author":"Xiao Han","year":"2017","unstructured":"Han Xiao, Kashif Rasul, and Roland Vollgraf. 2017. Fashion-MNIST: a Novel Image Dataset for Benchmarking Machine Learning Algorithms. CoRR abs\/1708.07747 (2017)."},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1145\/2629592"}],"event":{"name":"BUILDSYS '25: 12th ACM International Conference on Systems for Energy-Efficient Buildings, Cities, and Transportation","location":"Colorado School of Mines Golden CO USA","acronym":"BUILDSYS '25","sponsor":["SIGEnergy ACM Special Interest Group on Energy Systems and Informatics"]},"container-title":["Proceedings of the 12th ACM International Conference on Systems for Energy-Efficient Buildings, Cities, and Transportation"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3736425.3772355","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,11]],"date-time":"2025-11-11T12:23:46Z","timestamp":1762863826000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3736425.3772355"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,11]]},"references-count":33,"alternative-id":["10.1145\/3736425.3772355","10.1145\/3736425"],"URL":"https:\/\/doi.org\/10.1145\/3736425.3772355","relation":{},"subject":[],"published":{"date-parts":[[2025,11,11]]},"assertion":[{"value":"2025-11-11","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}