{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,21]],"date-time":"2025-08-21T16:48:05Z","timestamp":1755794885461,"version":"3.44.0"},"publisher-location":"New York, NY, USA","reference-count":94,"publisher":"ACM","license":[{"start":{"date-parts":[[2025,7,20]],"date-time":"2025-07-20T00:00:00Z","timestamp":1752969600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/501100002855","name":"Ministry of Science and Technology of the People's Republic of China","doi-asserted-by":"publisher","award":["2023YFB2704903"],"award-info":[{"award-number":["2023YFB2704903"]}],"id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/501100002855","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,7,20]]},"DOI":"10.1145\/3690624.3709385","type":"proceedings-article","created":{"date-parts":[[2025,4,4]],"date-time":"2025-04-04T18:44:43Z","timestamp":1743792283000},"page":"2791-2802","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["BackdoorMBTI: A Backdoor Learning Multimodal Benchmark Tool Kit for Backdoor Defense Evaluation"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-7942-7125","authenticated-orcid":false,"given":"Haiyang","family":"Yu","sequence":"first","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-5421-3064","authenticated-orcid":false,"given":"Tian","family":"Xie","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-4272-9604","authenticated-orcid":false,"given":"Jiaping","family":"Gui","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3961-5523","authenticated-orcid":false,"given":"Pengyang","family":"Wang","sequence":"additional","affiliation":[{"name":"University of Macau, Taipa, Macau SAR, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0945-8790","authenticated-orcid":false,"given":"Pengzhou","family":"Cheng","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4530-5118","authenticated-orcid":false,"given":"Ping","family":"Yi","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6107-7859","authenticated-orcid":false,"given":"Yue","family":"Wu","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]}],"member":"320","published-online":{"date-parts":[[2025,7,20]]},"reference":[{"key":"e_1_3_2_2_1_1","volume-title":"Dbpedia: A nucleus for a web of open data. In international semantic web conference","author":"Auer S\u00f6ren","year":"2007","unstructured":"S\u00f6ren Auer, Christian Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, and Zachary Ives. 2007. Dbpedia: A nucleus for a web of open data. In international semantic web conference. Springer, 722--735."},{"key":"e_1_3_2_2_2_1","volume-title":"30th USENIX Security Symposium (USENIX Security 21)","author":"Bagdasaryan Eugene","year":"2021","unstructured":"Eugene Bagdasaryan and Vitaly Shmatikov. 2021. Blind backdoors in deep learning models. In 30th USENIX Security Symposium (USENIX Security 21). 1505--1521."},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_3_1","DOI":"10.1007\/978-3-031-20065-6_7"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_4_1","DOI":"10.1109\/ICIP.2019.8802997"},{"key":"e_1_3_2_2_5_1","volume-title":"Detecting backdoor attacks on deep neural networks by activation clustering. arXiv preprint arXiv:1811.03728","author":"Chen Bryant","year":"2018","unstructured":"Bryant Chen, Wilka Carvalho, Nathalie Baracaldo, Heiko Ludwig, Benjamin Edwards, Taesung Lee, Ian Molloy, and Biplav Srivastava. 2018. Detecting backdoor attacks on deep neural networks by activation clustering. arXiv preprint arXiv:1811.03728 (2018)."},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_6_1","DOI":"10.1016\/j.neucom.2021.04.105"},{"key":"e_1_3_2_2_7_1","first-page":"8","article-title":"DeepInspect: A Black-box Trojan Detection and Mitigation Framework for Deep Neural Networks","volume":"2","author":"Chen Huili","year":"2019","unstructured":"Huili Chen, Cheng Fu, Jishen Zhao, and Farinaz Koushanfar. 2019. DeepInspect: A Black-box Trojan Detection and Mitigation Framework for Deep Neural Networks.. In IJCAI, Vol. 2. 8.","journal-title":"IJCAI"},{"key":"e_1_3_2_2_8_1","first-page":"9727","article-title":"Effective backdoor defense by exploiting sensitivity of poisoned samples","volume":"35","author":"Chen Weixin","year":"2022","unstructured":"Weixin Chen, Baoyuan Wu, and Haoqian Wang. 2022. Effective backdoor defense by exploiting sensitivity of poisoned samples. Advances in Neural Information Processing Systems, Vol. 35 (2022), 9727--9737.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_9_1","volume-title":"Targeted backdoor attacks on deep learning systems using data poisoning. arXiv preprint arXiv:1712.05526","author":"Chen Xinyun","year":"2017","unstructured":"Xinyun Chen, Chang Liu, Bo Li, Kimberly Lu, and Dawn Song. 2017. Targeted backdoor attacks on deep learning systems using data poisoning. arXiv preprint arXiv:1712.05526 (2017)."},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_10_1","DOI":"10.1109\/ACCESS.2021.3110239"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_11_1","DOI":"10.1109\/SPW50608.2020.00025"},{"key":"e_1_3_2_2_12_1","volume-title":"Proceedings of NeurIPS: Datasets and Benchmarks.","author":"Cui Ganqu","year":"2022","unstructured":"Ganqu Cui, Lifan Yuan, Bingxiang He, Yangyi Chen, Zhiyuan Liu, and Maosong Sun. 2022. A Unified Evaluation of Textual Backdoor Learning: Frameworks and Benchmarks. In Proceedings of NeurIPS: Datasets and Benchmarks."},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_13_1","DOI":"10.1109\/ACCESS.2019.2941376"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_14_1","DOI":"10.1109\/CVPR.2009.5206848"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_15_1","DOI":"10.1109\/ICCV48922.2021.01175"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_16_1","DOI":"10.1109\/ICASSP43922.2022.9746008"},{"unstructured":"Lilian Sanselme F\u00e9lix Martel. 2023. Text Noiser. https:\/\/github.com\/preligens-lab\/textnoisr.","key":"e_1_3_2_2_17_1"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_18_1","DOI":"10.1145\/3359789.3359790"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_19_1","DOI":"10.1109\/MCOM.012.2200596"},{"key":"e_1_3_2_2_20_1","volume-title":"Badnets: Identifying vulnerabilities in the machine learning model supply chain. arXiv preprint arXiv:1708.06733","author":"Gu Tianyu","year":"2017","unstructured":"Tianyu Gu, Brendan Dolan-Gavitt, and Siddharth Garg. 2017. Badnets: Identifying vulnerabilities in the machine learning model supply chain. arXiv preprint arXiv:1708.06733 (2017)."},{"key":"e_1_3_2_2_21_1","volume-title":"Scale-up: An efficient black-box input-level backdoor detection via analyzing scaled prediction consistency. arXiv preprint arXiv:2302.03251","author":"Guo Junfeng","year":"2023","unstructured":"Junfeng Guo, Yiming Li, Xun Chen, Hanqing Guo, Lichao Sun, and Cong Liu. 2023. Scale-up: An efficient black-box input-level backdoor detection via analyzing scaled prediction consistency. arXiv preprint arXiv:2302.03251 (2023)."},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_22_1","DOI":"10.1109\/OJSP.2022.3190213"},{"key":"e_1_3_2_2_23_1","volume-title":"Tabor: A highly accurate approach to inspecting and restoring trojan backdoors in ai systems. arXiv preprint arXiv:1908.01763","author":"Guo Wenbo","year":"2019","unstructured":"Wenbo Guo, Lun Wang, Xinyu Xing, Min Du, and Dawn Song. 2019. Tabor: A highly accurate approach to inspecting and restoring trojan backdoors in ai systems. arXiv preprint arXiv:1908.01763 (2019)."},{"key":"e_1_3_2_2_24_1","volume-title":"listen, and attack: Backdoor attacks against video action recognition. arXiv preprint arXiv:2301.00986","author":"Al Kader Hammoud Hasan Abed","year":"2023","unstructured":"Hasan Abed Al Kader Hammoud, Shuming Liu, Mohammed Alkhrashi, Fahad AlBalawi, and Bernard Ghanem. 2023. Look, listen, and attack: Backdoor attacks against video action recognition. arXiv preprint arXiv:2301.00986 (2023)."},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_25_1","DOI":"10.1109\/SP54263.2024.00031"},{"key":"e_1_3_2_2_26_1","volume-title":"International Conference on Machine Learning. PMLR, 4129--4139","author":"Hayase Jonathan","year":"2021","unstructured":"Jonathan Hayase, Weihao Kong, Raghav Somani, and Sewoong Oh. 2021. Spectre: Defending against backdoor attacks using robust statistics. In International Conference on Machine Learning. PMLR, 4129--4139."},{"key":"e_1_3_2_2_27_1","volume-title":"Backdoor defense via decoupling the training process. arXiv preprint arXiv:2202.03423","author":"Huang Kunzhe","year":"2022","unstructured":"Kunzhe Huang, Yiming Li, Baoyuan Wu, Zhan Qin, and Kui Ren. 2022. Backdoor defense via decoupling the training process. arXiv preprint arXiv:2202.03423 (2022)."},{"key":"e_1_3_2_2_28_1","volume-title":"Neuroninspect: Detecting backdoors in neural networks via output explanations. arXiv preprint arXiv:1911.07399","author":"Huang Xijie","year":"2019","unstructured":"Xijie Huang, Moustafa Alzantot, and Mani Srivastava. 2019. Neuroninspect: Detecting backdoors in neural networks via output explanations. arXiv preprint arXiv:1911.07399 (2019)."},{"key":"e_1_3_2_2_29_1","volume-title":"The trojai software framework: An opensource tool for embedding trojans into deep learning models. arXiv preprint arXiv:2003.07233","author":"Karra Kiran","year":"2020","unstructured":"Kiran Karra, Chace Ashcraft, and Neil Fendley. 2020. The trojai software framework: An opensource tool for embedding trojans into deep learning models. arXiv preprint arXiv:2003.07233 (2020)."},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_30_1","DOI":"10.1109\/ICASSP49357.2023.10096332"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_31_1","DOI":"10.1145\/3522783.3529523"},{"unstructured":"Alex Krizhevsky Geoffrey Hinton et al. 2009. Learning multiple layers of features from tiny images. (2009).","key":"e_1_3_2_2_32_1"},{"key":"e_1_3_2_2_33_1","volume-title":"Tiny imagenet visual recognition challenge. CS 231N","author":"Le Ya","year":"2015","unstructured":"Ya Le and Xuan Yang. 2015. Tiny imagenet visual recognition challenge. CS 231N, Vol. 7, 7 (2015), 3."},{"key":"e_1_3_2_2_34_1","volume-title":"2021 d. Backdoor attacks on pre-trained models by layerwise weight poisoning. arXiv preprint arXiv:2108.13888","author":"Li Linyang","year":"2021","unstructured":"Linyang Li, Demin Song, Xiaonan Li, Jiehang Zeng, Ruotian Ma, and Xipeng Qiu. 2021 d. Backdoor attacks on pre-trained models by layerwise weight poisoning. arXiv preprint arXiv:2108.13888 (2021)."},{"key":"e_1_3_2_2_35_1","volume-title":"Backdoor learning: A survey","author":"Li Yiming","year":"2022","unstructured":"Yiming Li, Yong Jiang, Zhifeng Li, and Shu-Tao Xia. 2022. Backdoor learning: A survey. IEEE Transactions on Neural Networks and Learning Systems (2022)."},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_36_1","DOI":"10.1109\/ICCV48922.2021.01615"},{"key":"e_1_3_2_2_37_1","first-page":"14900","article-title":"Anti-backdoor learning: Training clean models on poisoned data","volume":"34","author":"Li Yige","year":"2021","unstructured":"Yige Li, Xixiang Lyu, Nodens Koren, Lingjuan Lyu, Bo Li, and Xingjun Ma. 2021b. Anti-backdoor learning: Training clean models on poisoned data. Advances in Neural Information Processing Systems, Vol. 34 (2021), 14900--14912.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_38_1","volume-title":"Neural attention distillation: Erasing backdoor triggers from deep neural networks. arXiv preprint arXiv:2101.05930","author":"Li Yige","year":"2021","unstructured":"Yige Li, Xixiang Lyu, Nodens Koren, Lingjuan Lyu, Bo Li, and Xingjun Ma. 2021c. Neural attention distillation: Erasing backdoor triggers from deep neural networks. arXiv preprint arXiv:2101.05930 (2021)."},{"key":"e_1_3_2_2_39_1","volume-title":"BackdoorBox: A Python Toolbox for Backdoor Learning. In ICLR Workshop.","author":"Li Yiming","year":"2023","unstructured":"Yiming Li, Mengxi Ya, Yang Bai, Yong Jiang, and Shu-Tao Xia. 2023. BackdoorBox: A Python Toolbox for Backdoor Learning. In ICLR Workshop."},{"key":"e_1_3_2_2_40_1","volume-title":"2021 e. Backdoor attack in the physical world. arXiv preprint arXiv:2104.02361","author":"Li Yiming","year":"2021","unstructured":"Yiming Li, Tongqing Zhai, Yong Jiang, Zhifeng Li, and Shu-Tao Xia. 2021 e. Backdoor attack in the physical world. arXiv preprint arXiv:2104.02361 (2021)."},{"key":"e_1_3_2_2_41_1","volume-title":"2021 f. Backdoor attack in the physical world. arXiv preprint arXiv:2104.02361","author":"Li Yiming","year":"2021","unstructured":"Yiming Li, Tongqing Zhai, Yong Jiang, Zhifeng Li, and Shu-Tao Xia. 2021 f. Backdoor attack in the physical world. arXiv preprint arXiv:2104.02361 (2021)."},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_42_1","DOI":"10.1007\/978-3-030-00470-5_13"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_43_1","DOI":"10.1145\/3503161.3548261"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_44_1","DOI":"10.1145\/3319535.3363216"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_45_1","DOI":"10.14722\/ndss.2018.23291"},{"key":"e_1_3_2_2_46_1","volume-title":"Proceedings, Part X 16","author":"Liu Yunfei","year":"2020","unstructured":"Yunfei Liu, Xingjun Ma, James Bailey, and Feng Lu. 2020. Reflection backdoor: A natural backdoor attack on deep neural networks. In Computer Vision--ECCV 2020: 16th European Conference, Glasgow, UK, August 23--28, 2020, Proceedings, Part X 16. Springer, 182--199."},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_47_1","DOI":"10.1109\/ICCD.2017.16"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_48_1","DOI":"10.1109\/ICCV.2015.425"},{"key":"e_1_3_2_2_49_1","volume-title":"32nd USENIX Security Symposium (USENIX Security 23)","author":"Lv Peizhuo","year":"2023","unstructured":"Peizhuo Lv, Chang Yue, Ruigang Liang, Yunfei Yang, Shengzhi Zhang, Hualong Ma, and Kai Chen. 2023. A data-free backdoor injection approach in neural networks. In 32nd USENIX Security Symposium (USENIX Security 23). 2671--2688."},{"unstructured":"Edward Ma. 2019. NLP Augmentation. https:\/\/github.com\/makcedward\/nlpaug.","key":"e_1_3_2_2_50_1"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_51_1","DOI":"10.14722\/ndss.2019.23415"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_52_1","DOI":"10.5555\/2002472.2002491"},{"key":"e_1_3_2_2_53_1","volume-title":"Joon Son Chung, and Andrew Zisserman","author":"Nagrani Arsha","year":"2017","unstructured":"Arsha Nagrani, Joon Son Chung, and Andrew Zisserman. 2017. Voxceleb: a large-scale speaker identification dataset. arXiv preprint arXiv:1706.08612 (2017)."},{"key":"e_1_3_2_2_54_1","volume-title":"Wanet--imperceptible warping-based backdoor attack. arXiv preprint arXiv:2102.10369","author":"Nguyen Anh","year":"2021","unstructured":"Anh Nguyen and Anh Tran. 2021. Wanet--imperceptible warping-based backdoor attack. arXiv preprint arXiv:2102.10369 (2021)."},{"key":"e_1_3_2_2_55_1","first-page":"3454","article-title":"Input-aware dynamic backdoor attack","volume":"33","author":"Nguyen Tuan Anh","year":"2020","unstructured":"Tuan Anh Nguyen and Anh Tran. 2020. Input-aware dynamic backdoor attack. Advances in Neural Information Processing Systems, Vol. 33 (2020), 3454--3464.","journal-title":"Advances in Neural Information Processing Systems"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_56_1","DOI":"10.1109\/EuroSP53844.2022.00048"},{"key":"e_1_3_2_2_57_1","volume-title":"Onion: A simple and effective defense against textual backdoor attacks. arXiv preprint arXiv:2011.10369","author":"Qi Fanchao","year":"2020","unstructured":"Fanchao Qi, Yangyi Chen, Mukai Li, Yuan Yao, Zhiyuan Liu, and Maosong Sun. 2020. Onion: A simple and effective defense against textual backdoor attacks. arXiv preprint arXiv:2011.10369 (2020)."},{"key":"e_1_3_2_2_58_1","volume-title":"Hidden killer: Invisible textual backdoor attacks with syntactic trigger. arXiv preprint arXiv:2105.12400","author":"Qi Fanchao","year":"2021","unstructured":"Fanchao Qi, Mukai Li, Yangyi Chen, Zhengyan Zhang, Zhiyuan Liu, Yasheng Wang, and Maosong Sun. 2021. Hidden killer: Invisible textual backdoor attacks with syntactic trigger. arXiv preprint arXiv:2105.12400 (2021)."},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_59_1","DOI":"10.1109\/CVPR52688.2022.01299"},{"key":"e_1_3_2_2_60_1","volume-title":"32nd USENIX Security Symposium (USENIX Security 23)","author":"Qi Xiangyu","year":"2023","unstructured":"Xiangyu Qi, Tinghao Xie, Jiachen T Wang, Tong Wu, Saeed Mahloujifar, and Prateek Mittal. 2023. Towards a proactive {ML} approach for detecting backdoor poison samples. In 32nd USENIX Security Symposium (USENIX Security 23). 1685--1702."},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_61_1","DOI":"10.1145\/3433210.3453108"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_62_1","DOI":"10.1609\/aaai.v34i07.6871"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_63_1","DOI":"10.1109\/EuroSP53844.2022.00049"},{"key":"e_1_3_2_2_64_1","volume-title":"Fine-tuning is all you need to mitigate backdoor attacks. arXiv preprint arXiv:2212.09067","author":"Sha Zeyang","year":"2022","unstructured":"Zeyang Sha, Xinlei He, Pascal Berrang, Mathias Humbert, and Yang Zhang. 2022. Fine-tuning is all you need to mitigate backdoor attacks. arXiv preprint arXiv:2212.09067 (2022)."},{"key":"e_1_3_2_2_65_1","volume-title":"Poison frogs! targeted clean-label poisoning attacks on neural networks. Advances in neural information processing systems","author":"Shafahi Ali","year":"2018","unstructured":"Ali Shafahi, W Ronny Huang, Mahyar Najibi, Octavian Suciu, Christoph Studer, Tudor Dumitras, and Tom Goldstein. 2018. Poison frogs! targeted clean-label poisoning attacks on neural networks. Advances in neural information processing systems, Vol. 31 (2018)."},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_66_1","DOI":"10.18653\/v1\/D13-1170"},{"key":"e_1_3_2_2_67_1","first-page":"19165","article-title":"Sleeper agent: Scalable hidden trigger backdoors for neural networks trained from scratch","volume":"35","author":"Souri Hossein","year":"2022","unstructured":"Hossein Souri, Liam Fowl, Rama Chellappa, Micah Goldblum, and Tom Goldstein. 2022. Sleeper agent: Scalable hidden trigger backdoors for neural networks trained from scratch. Advances in Neural Information Processing Systems, Vol. 35 (2022), 19165--19178.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_68_1","volume-title":"Man vs. computer: Benchmarking machine learning algorithms for traffic sign recognition. Neural networks","author":"Stallkamp Johannes","year":"2012","unstructured":"Johannes Stallkamp, Marc Schlipsing, Jan Salmen, and Christian Igel. 2012. Man vs. computer: Benchmarking machine learning algorithms for traffic sign recognition. Neural networks, Vol. 32 (2012), 323--332."},{"key":"e_1_3_2_2_69_1","volume-title":"30th USENIX Security Symposium (USENIX Security 21)","author":"Tang Di","year":"2021","unstructured":"Di Tang, XiaoFeng Wang, Haixu Tang, and Kehuan Zhang. 2021. Demon in the variant: Statistical analysis of {DNNs} for robust backdoor contamination detection. In 30th USENIX Security Symposium (USENIX Security 21). 1541--1558."},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_70_1","DOI":"10.1145\/3394486.3403064"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_71_1","DOI":"10.1109\/SP46214.2022.9833688"},{"key":"e_1_3_2_2_72_1","volume-title":"Spectral signatures in backdoor attacks. Advances in neural information processing systems","author":"Tran Brandon","year":"2018","unstructured":"Brandon Tran, Jerry Li, and Aleksander Madry. 2018. Spectral signatures in backdoor attacks. Advances in neural information processing systems, Vol. 31 (2018)."},{"unstructured":"Alexander Turner Dimitris Tsipras and Aleksander Madry. 2018. Clean-label backdoor attacks. (2018).","key":"e_1_3_2_2_73_1"},{"key":"e_1_3_2_2_74_1","volume-title":"Label-consistent backdoor attacks. arXiv preprint arXiv:1912.02771","author":"Turner Alexander","year":"2019","unstructured":"Alexander Turner, Dimitris Tsipras, and Aleksander Madry. 2019. Label-consistent backdoor attacks. arXiv preprint arXiv:1912.02771 (2019)."},{"key":"e_1_3_2_2_75_1","first-page":"451","article-title":"GTZAN Dataset","volume":"2","author":"Tzanetakis George","year":"2002","unstructured":"George Tzanetakis and Perry Cook. 2002. GTZAN Dataset. Journal of Machine Learning Research, Vol. 2 (2002), 451--452. http:\/\/marsyas.info\/","journal-title":"Journal of Machine Learning Research"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_76_1","DOI":"10.1109\/TR.2022.3159784"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_77_1","DOI":"10.1109\/SP.2019.00031"},{"key":"e_1_3_2_2_78_1","first-page":"36396","article-title":"Training with more confidence: Mitigating injected and natural backdoors during training","volume":"35","author":"Wang Zhenting","year":"2022","unstructured":"Zhenting Wang, Hailun Ding, Juan Zhai, and Shiqing Ma. 2022a. Training with more confidence: Mitigating injected and natural backdoors during training. Advances in Neural Information Processing Systems, Vol. 35 (2022), 36396--36410.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_79_1","first-page":"9738","article-title":"Rethinking the reverse-engineering of trojan triggers","volume":"35","author":"Wang Zhenting","year":"2022","unstructured":"Zhenting Wang, Kai Mei, Hailun Ding, Juan Zhai, and Shiqing Ma. 2022b. Rethinking the reverse-engineering of trojan triggers. Advances in Neural Information Processing Systems, Vol. 35 (2022), 9738--9753.","journal-title":"Advances in Neural Information Processing Systems"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_80_1","DOI":"10.1109\/CVPR52688.2022.01465"},{"key":"e_1_3_2_2_81_1","volume-title":"Speech commands: A dataset for limited-vocabulary speech recognition. arXiv preprint arXiv:1804.03209","author":"Warden Pete","year":"2018","unstructured":"Pete Warden. 2018. Speech commands: A dataset for limited-vocabulary speech recognition. arXiv preprint arXiv:1804.03209 (2018)."},{"key":"e_1_3_2_2_82_1","volume-title":"BackdoorBench: A Comprehensive Benchmark of Backdoor Learning. In Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track.","author":"Wu Baoyuan","year":"2022","unstructured":"Baoyuan Wu, Hongrui Chen, Mingda Zhang, Zihao Zhu, Shaokui Wei, Danni Yuan, and Chao Shen. 2022. BackdoorBench: A Comprehensive Benchmark of Backdoor Learning. In Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track."},{"key":"e_1_3_2_2_83_1","first-page":"16913","article-title":"Adversarial neuron pruning purifies backdoored deep models","volume":"34","author":"Wu Dongxian","year":"2021","unstructured":"Dongxian Wu and Yisen Wang. 2021. Adversarial neuron pruning purifies backdoored deep models. Advances in Neural Information Processing Systems, Vol. 34 (2021), 16913--16925.","journal-title":"Advances in Neural Information Processing Systems"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_84_1","DOI":"10.1109\/SP40001.2021.00034"},{"key":"e_1_3_2_2_85_1","volume-title":"Bite: Textual backdoor attacks with iterative trigger injection. arXiv preprint arXiv:2205.12700","author":"Yan Jun","year":"2022","unstructured":"Jun Yan, Vansh Gupta, and Xiang Ren. 2022. Bite: Textual backdoor attacks with iterative trigger injection. arXiv preprint arXiv:2205.12700 (2022)."},{"key":"e_1_3_2_2_86_1","volume-title":"Rap: Robustness-aware perturbations for defending against backdoor attacks on nlp models. arXiv preprint arXiv:2110.07831","author":"Yang Wenkai","year":"2021","unstructured":"Wenkai Yang, Yankai Lin, Peng Li, Jie Zhou, and Xu Sun. 2021. Rap: Robustness-aware perturbations for defending against backdoor attacks on nlp models. arXiv preprint arXiv:2110.07831 (2021)."},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_87_1","DOI":"10.1145\/3319535.3354209"},{"key":"e_1_3_2_2_88_1","volume-title":"Adversarial unlearning of backdoors via implicit hypergradient. arXiv preprint arXiv:2110.03735","author":"Zeng Yi","year":"2021","unstructured":"Yi Zeng, Si Chen, Won Park, Z Morley Mao, Ming Jin, and Ruoxi Jia. 2021a. Adversarial unlearning of backdoors via implicit hypergradient. arXiv preprint arXiv:2110.03735 (2021)."},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_89_1","DOI":"10.1109\/ICCV48922.2021.01616"},{"key":"e_1_3_2_2_90_1","volume-title":"Character-level convolutional networks for text classification. Advances in neural information processing systems","author":"Zhang Xiang","year":"2015","unstructured":"Xiang Zhang, Junbo Zhao, and Yann LeCun. 2015. Character-level convolutional networks for text classification. Advances in neural information processing systems, Vol. 28 (2015)."},{"key":"e_1_3_2_2_91_1","volume-title":"Karthikeyan Natesan Ramamurthy, and Xue Lin","author":"Zhao Pu","year":"2020","unstructured":"Pu Zhao, Pin-Yu Chen, Payel Das, Karthikeyan Natesan Ramamurthy, and Xue Lin. 2020. Bridging mode connectivity in loss landscapes and adversarial robustness. arXiv preprint arXiv:2005.00060 (2020)."},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_92_1","DOI":"10.1007\/978-3-031-20065-6_11"},{"key":"e_1_3_2_2_93_1","first-page":"18667","article-title":"Pre-activation Distributions Expose Backdoor Neurons","volume":"35","author":"Zheng Runkai","year":"2022","unstructured":"Runkai Zheng, Rongjun Tang, Jianze Li, and Li Liu. 2022b. Pre-activation Distributions Expose Backdoor Neurons. Advances in Neural Information Processing Systems, Vol. 35 (2022), 18667--18680.","journal-title":"Advances in Neural Information Processing Systems"},{"doi-asserted-by":"publisher","key":"e_1_3_2_2_94_1","DOI":"10.1109\/SP46215.2023.10351028"}],"event":{"sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data"],"acronym":"KDD '25","name":"KDD '25: The 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining","location":"Toronto ON Canada"},"container-title":["Proceedings of the 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining V.1"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3690624.3709385","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3690624.3709385","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,16]],"date-time":"2025-08-16T15:42:51Z","timestamp":1755358971000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3690624.3709385"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7,20]]},"references-count":94,"alternative-id":["10.1145\/3690624.3709385","10.1145\/3690624"],"URL":"https:\/\/doi.org\/10.1145\/3690624.3709385","relation":{},"subject":[],"published":{"date-parts":[[2025,7,20]]},"assertion":[{"value":"2025-07-20","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}