{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T16:16:00Z","timestamp":1774023360706,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":59,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,11,21]],"date-time":"2023-11-21T00:00:00Z","timestamp":1700524800000},"content-version":"vor","delay-in-days":6,"URL":"http:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"DOI":"10.13039\/100000001","name":"NSF (National Science Foundation)","doi-asserted-by":"publisher","award":["OAC-2239622"],"award-info":[{"award-number":["OAC-2239622"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","award":["IIS-2312794"],"award-info":[{"award-number":["IIS-2312794"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,11,15]]},"DOI":"10.1145\/3576915.3616617","type":"proceedings-article","created":{"date-parts":[[2023,11,21]],"date-time":"2023-11-21T12:35:13Z","timestamp":1700570113000},"page":"771-785","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":154,"title":["Narcissus: A Practical Clean-Label Backdoor Attack with Limited Information"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6901-9194","authenticated-orcid":false,"given":"Yi","family":"Zeng","sequence":"first","affiliation":[{"name":"Virginia Tech, Blacksburg, VA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-5925-7535","authenticated-orcid":false,"given":"Minzhou","family":"Pan","sequence":"additional","affiliation":[{"name":"Virginia Tech, Blacksburg, VA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-6094-2473","authenticated-orcid":false,"given":"Hoang Anh","family":"Just","sequence":"additional","affiliation":[{"name":"Virginia Tech, Blacksburg, VA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3170-4994","authenticated-orcid":false,"given":"Lingjuan","family":"Lyu","sequence":"additional","affiliation":[{"name":"Sony AI, Tokyo, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1004-0140","authenticated-orcid":false,"given":"Meikang","family":"Qiu","sequence":"additional","affiliation":[{"name":"Augusta University, Augusta, GA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9662-9556","authenticated-orcid":false,"given":"Ruoxi","family":"Jia","sequence":"additional","affiliation":[{"name":"Virginia Tech, Blacksburg, VA, USA"}]}],"member":"320","published-online":{"date-parts":[[2023,11,21]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"NeurIPS","volume":"24","author":"Abernethy Jacob D","year":"2011","unstructured":"Jacob D Abernethy and Rafael Frongillo. 2011. A collaborative mechanism for crowdsourcing prediction problems. NeurIPS, Vol. 24 (2011)."},{"key":"e_1_3_2_1_2_1","first-page":"21428","article-title":"Geometric dataset distances via optimal transport","volume":"33","author":"Alvarez-Melis David","year":"2020","unstructured":"David Alvarez-Melis and Nicolo Fusi. 2020. Geometric dataset distances via optimal transport. NeurIPS, Vol. 33 (2020), 21428--21439.","journal-title":"NeurIPS"},{"key":"e_1_3_2_1_3_1","unstructured":"Anish Athalye Logan Engstrom Andrew Ilyas and Kevin Kwok. 2018. Synthesizing robust adversarial examples. In ICML. PMLR 284--293."},{"key":"e_1_3_2_1_4_1","volume-title":"Neural networks: Tricks of the trade","author":"Bottou L\u00e9on","unstructured":"L\u00e9on Bottou. 2012. Stochastic gradient descent tricks. In Neural networks: Tricks of the trade. Springer, 421--436."},{"key":"e_1_3_2_1_5_1","unstructured":"Xinyun Chen Chang Liu Bo Li Kimberly Lu and Dawn Song. 2017. Targeted Backdoor Attacks on Deep Learning Systems Using Data Poisoning. arxiv: 1712.05526 [cs.CR]"},{"key":"e_1_3_2_1_6_1","volume-title":"Improving black-box adversarial attacks with a transfer-based prior. NeurIPS","author":"Cheng Shuyu","year":"2019","unstructured":"Shuyu Cheng, Yinpeng Dong, Tianyu Pang, Hang Su, and Jun Zhu. 2019. Improving black-box adversarial attacks with a transfer-based prior. NeurIPS (2019)."},{"key":"e_1_3_2_1_7_1","unstructured":"Alexey Dosovitskiy Lucas Beyer Alexander Kolesnikov Dirk Weissenborn Xiaohua Zhai Thomas Unterthiner Mostafa Dehghani Matthias Minderer Georg Heigold Sylvain Gelly Jakob Uszkoreit and Neil Houlsby. 2021. An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale. In ICLR."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1145\/3359789.3359790"},{"key":"e_1_3_2_1_9_1","unstructured":"Gregory Griffin Alex Holub and Pietro Perona. 2007. Caltech-256 object category dataset. (2007)."},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2909068"},{"key":"e_1_3_2_1_11_1","volume-title":"Tabor: A highly accurate approach to inspecting and restoring trojan backdoors in ai systems. arXiv:1908.01763","author":"Guo Wenbo","year":"2019","unstructured":"Wenbo Guo, Lun Wang, Xinyu Xing, Min Du, and Dawn Song. 2019. Tabor: A highly accurate approach to inspecting and restoring trojan backdoors in ai systems. arXiv:1908.01763 (2019)."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"e_1_3_2_1_13_1","volume-title":"Detection of Traffic Signs in Real-World Images: The German Traffic Sign Detection Benchmark. In International Joint Conference on Neural Networks.","author":"Houben Sebastian","year":"2013","unstructured":"Sebastian Houben, Johannes Stallkamp, Jan Salmen, Marc Schlipsing, and Christian Igel. 2013. Detection of Traffic Signs in Real-World Images: The German Traffic Sign Detection Benchmark. In International Joint Conference on Neural Networks."},{"key":"e_1_3_2_1_14_1","volume-title":"Proceedings of NAACL-HLT. 4171--4186","author":"Ming-Wei Chang Jacob Devlin","year":"2019","unstructured":"Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of NAACL-HLT. 4171--4186."},{"key":"e_1_3_2_1_15_1","volume-title":"Adam: A Method for Stochastic Optimization. In ICLR 2015, Conference Track Proceedings, Yoshua Bengio and Yann LeCun (Eds.).","author":"Diederik","unstructured":"Diederik P. Kingma and Jimmy Ba. 2015. Adam: A Method for Stochastic Optimization. In ICLR 2015, Conference Track Proceedings, Yoshua Bengio and Yann LeCun (Eds.)."},{"key":"e_1_3_2_1_16_1","unstructured":"Alex Krizhevsky Geoffrey Hinton et al. 2009. Learning multiple layers of features from tiny images. (2009)."},{"key":"e_1_3_2_1_17_1","volume-title":"Attribute and simile classifiers for face verification","author":"Kumar Neeraj","unstructured":"Neeraj Kumar, Alexander C Berg, Peter N Belhumeur, and Shree K Nayar. 2009. Attribute and simile classifiers for face verification. In ICCV. IEEE, 365--372."},{"key":"e_1_3_2_1_18_1","volume-title":"Tiny imagenet visual recognition challenge. CS 231N","author":"Le Ya","year":"2015","unstructured":"Ya Le and Xuan Yang. 2015. Tiny imagenet visual recognition challenge. CS 231N, Vol. 7, 7 (2015), 3."},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1109\/TDSC.2020.3021407"},{"key":"e_1_3_2_1_20_1","unstructured":"Yuezun Li Yiming Li Baoyuan Wu Longkang Li Ran He and Siwei Lyu. 2021a. Invisible Backdoor Attack with Sample-Specific Triggers. In ICCV."},{"key":"e_1_3_2_1_21_1","volume-title":"NeurIPS","volume":"34","author":"Li Yige","year":"2021","unstructured":"Yige Li, Xixiang Lyu, Nodens Koren, Lingjuan Lyu, Bo Li, and Xingjun Ma. 2021b. Anti-backdoor learning: Training clean models on poisoned data. NeurIPS, Vol. 34 (2021)."},{"key":"e_1_3_2_1_22_1","volume-title":"Backdoor learning: A survey. arXiv:2007.08745","author":"Li Yiming","year":"2020","unstructured":"Yiming Li, Baoyuan Wu, Yong Jiang, Zhifeng Li, and Shu-Tao Xia. 2020a. Backdoor learning: A survey. arXiv:2007.08745 (2020)."},{"key":"e_1_3_2_1_23_1","volume-title":"Backdoor Attack in the Physical World. In ICLR Workshop.","author":"Li Yiming","year":"2021","unstructured":"Yiming Li, Tongqing Zhai, Yong Jiang, Zhifeng Li, and Shu-Tao Xia. 2021c. Backdoor Attack in the Physical World. In ICLR Workshop."},{"key":"e_1_3_2_1_24_1","volume-title":"Fine-pruning: Defending against backdooring attacks on deep neural networks","author":"Liu Kang","year":"2018","unstructured":"Kang Liu, Brendan Dolan-Gavitt, and Siddharth Garg. 2018a. Fine-pruning: Defending against backdooring attacks on deep neural networks. In RAID. Springer."},{"key":"e_1_3_2_1_25_1","unstructured":"Liyuan Liu Haoming Jiang Pengcheng He Weizhu Chen Xiaodong Liu Jianfeng Gao and Jiawei Han. 2019. On the Variance of the Adaptive Learning Rate and Beyond. In ICLR."},{"key":"e_1_3_2_1_26_1","unstructured":"Yingqi Liu Shiqing Ma Yousra Aafer Wen-Chuan Lee Juan Zhai Weihang Wang and Xiangyu Zhang. 2018b. Trojaning attack on neural networks. In NDSS."},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58607-2_11"},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.425"},{"key":"e_1_3_2_1_29_1","volume-title":"Sgdr: Stochastic gradient descent with warm restarts. In ICLR.","author":"Loshchilov Ilya","year":"2017","unstructured":"Ilya Loshchilov and Frank Hutter. 2017. Sgdr: Stochastic gradient descent with warm restarts. In ICLR."},{"key":"e_1_3_2_1_30_1","first-page":"3454","article-title":"Input-aware dynamic backdoor attack","volume":"33","author":"Nguyen Tuan Anh","year":"2020","unstructured":"Tuan Anh Nguyen and Anh Tran. 2020a. Input-aware dynamic backdoor attack. NeurIPS , Vol. 33 (2020), 3454--3464.","journal-title":"NeurIPS"},{"key":"e_1_3_2_1_31_1","unstructured":"Tuan Anh Nguyen and Anh Tuan Tran. 2020b. WaNet-Imperceptible Warping-based Backdoor Attack. In ICLR."},{"key":"e_1_3_2_1_32_1","unstructured":"Xiangyu Qi Tinghao Xie Yiming Li Saeed Mahloujifar and Prateek Mittal. 2022a. Revisiting the assumption of latent separability for backdoor defenses. In ICLR."},{"key":"e_1_3_2_1_33_1","unstructured":"Xiangyu Qi Tinghao Xie Ruizhe Pan Jifeng Zhu Yong Yang and Kai Bu. 2022b. Towards practical deployment-stage backdoor attack on deep neural networks. In CVPR."},{"key":"e_1_3_2_1_34_1","unstructured":"Xiangyu Qi Tinghao Xie Jiachen T Wang Tong Wu Saeed Mahloujifar and Prateek Mittal. 2023. Towards A Proactive $$ML$$ Approach for Detecting Backdoor Poison Samples. In USENIX Security 23. 1685--1702."},{"key":"e_1_3_2_1_35_1","volume-title":"Subnet replacement: Deployment-stage backdoor attack against deep neural networks in gray-box setting. arXiv:2107.07240","author":"Qi Xiangyu","year":"2021","unstructured":"Xiangyu Qi, Jifeng Zhu, Chulin Xie, and Yong Yang. 2021. Subnet replacement: Deployment-stage backdoor attack against deep neural networks in gray-box setting. arXiv:2107.07240 (2021)."},{"key":"e_1_3_2_1_36_1","volume-title":"NeurIPS","volume":"32","author":"Qin Chongli","year":"2019","unstructured":"Chongli Qin, James Martens, Sven Gowal, Dilip Krishnan, Krishnamurthy Dvijotham, Alhussein Fawzi, Soham De, Robert Stanforth, and Pushmeet Kohli. [n.,d.]. Adversarial robustness through local linearization. NeurIPS, 2019, Vol. 32 ( [n.,d.])."},{"key":"e_1_3_2_1_37_1","volume-title":"An overview of gradient descent optimization algorithms. arXiv:1609.04747","author":"Ruder Sebastian","year":"2016","unstructured":"Sebastian Ruder. 2016. An overview of gradient descent optimization algorithms. arXiv:1609.04747 (2016)."},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6871"},{"key":"e_1_3_2_1_39_1","volume-title":"FaceHack: Triggering backdoored facial recognition systems using facial characteristics. arXiv:2006.11623","author":"Sarkar Esha","year":"2020","unstructured":"Esha Sarkar, Hadjer Benkraouda, and Michail Maniatakos. 2020. FaceHack: Triggering backdoored facial recognition systems using facial characteristics. arXiv:2006.11623 (2020)."},{"key":"e_1_3_2_1_40_1","volume-title":"Membership inference attacks against machine learning models. In 2017 IEEE S&P","author":"Shokri Reza","unstructured":"Reza Shokri, Marco Stronati, Congzheng Song, and Vitaly Shmatikov. 2017. Membership inference attacks against machine learning models. In 2017 IEEE S&P. IEEE, 3--18."},{"key":"e_1_3_2_1_41_1","volume-title":"Sleeper agent: Scalable hidden trigger backdoors for neural networks trained from scratch. arXiv:2106.08970","author":"Souri Hossein","year":"2021","unstructured":"Hossein Souri, Micah Goldblum, Liam Fowl, Rama Chellappa, and Tom Goldstein. 2021. Sleeper agent: Scalable hidden trigger backdoors for neural networks trained from scratch. arXiv:2106.08970 (2021)."},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"e_1_3_2_1_43_1","volume-title":"Efficientnet: Rethinking model scaling for convolutional neural networks. In ICML. PMLR, 6105--6114.","author":"Tan Mingxing","year":"2019","unstructured":"Mingxing Tan and Quoc Le. 2019. Efficientnet: Rethinking model scaling for convolutional neural networks. In ICML. PMLR, 6105--6114."},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP46214.2022.9833688"},{"key":"e_1_3_2_1_45_1","volume-title":"Backdoor vulnerabilities in normally trained deep learning models. arXiv preprint arXiv:2211.15929","author":"Tao Guanhong","year":"2022","unstructured":"Guanhong Tao, Zhenting Wang, Siyuan Cheng, Shiqing Ma, Shengwei An, Yingqi Liu, Guangyu Shen, Zhuo Zhang, Yunshu Mao, and Xiangyu Zhang. 2022b. Backdoor vulnerabilities in normally trained deep learning models. arXiv preprint arXiv:2211.15929 (2022)."},{"key":"e_1_3_2_1_46_1","volume-title":"Label-consistent backdoor attacks. arXiv preprint arXiv:1912.02771","author":"Turner Alexander","year":"2019","unstructured":"Alexander Turner, Dimitris Tsipras, and Aleksander Madry. 2019. Label-consistent backdoor attacks. arXiv preprint arXiv:1912.02771 (2019)."},{"key":"e_1_3_2_1_47_1","volume-title":"Neural cleanse: Identifying and mitigating backdoor attacks in neural networks. In 2019 IEEE S&P","author":"Wang Bolun","unstructured":"Bolun Wang, Yuanshun Yao, Shawn Shan, Huiying Li, Bimal Viswanath, Haitao Zheng, and Ben Y Zhao. 2019. Neural cleanse: Identifying and mitigating backdoor attacks in neural networks. In 2019 IEEE S&P. IEEE, 707--723."},{"key":"e_1_3_2_1_48_1","first-page":"36396","article-title":"Training with more confidence: Mitigating injected and natural backdoors during training","volume":"35","author":"Wang Zhenting","year":"2022","unstructured":"Zhenting Wang, Hailun Ding, Juan Zhai, and Shiqing Ma. 2022a. Training with more confidence: Mitigating injected and natural backdoors during training. NeurIPS, Vol. 35 (2022), 36396--36410.","journal-title":"NeurIPS"},{"key":"e_1_3_2_1_49_1","volume-title":"NeuIPS","volume":"35","author":"Wang Zhenting","year":"2022","unstructured":"Zhenting Wang, Kai Mei, Hailun Ding, Juan Zhai, and Shiqing Ma. 2022b. Rethinking the Reverse-engineering of Trojan Triggers. NeuIPS, Vol. 35 (2022)."},{"key":"e_1_3_2_1_50_1","volume-title":"UNICORN: A Unified Backdoor Trigger Inversion Framework. In ICLR.","author":"Wang Zhenting","year":"2022","unstructured":"Zhenting Wang, Kai Mei, Juan Zhai, and Shiqing Ma. 2022c. UNICORN: A Unified Backdoor Trigger Inversion Framework. In ICLR."},{"key":"e_1_3_2_1_51_1","volume-title":"Bppattack: Stealthy and efficient trojan attacks against deep neural networks via image quantization and contrastive adversarial learning. In CVPR.","author":"Wang Zhenting","year":"2022","unstructured":"Zhenting Wang, Juan Zhai, and Shiqing Ma. 2022d. Bppattack: Stealthy and efficient trojan attacks against deep neural networks via image quantization and contrastive adversarial learning. In CVPR."},{"key":"e_1_3_2_1_52_1","volume-title":"BaDExpert: Extracting Backdoor Functionality for Accurate Backdoor Input Detection. arXiv:2308.12439","author":"Xie Tinghao","year":"2023","unstructured":"Tinghao Xie, Xiangyu Qi, Ping He, Yiming Li, Jiachen T Wang, and Prateek Mittal. 2023. BaDExpert: Extracting Backdoor Functionality for Accurate Backdoor Input Detection. arXiv:2308.12439 (2023)."},{"key":"e_1_3_2_1_53_1","doi-asserted-by":"publisher","DOI":"10.5244\/C.30.87"},{"key":"e_1_3_2_1_54_1","unstructured":"Yi Zeng Si Chen Won Park Zhuoqing Mao Ming Jin and Ruoxi Jia. 2022. Adversarial Unlearning of Backdoors via Implicit Hypergradient. In ICLR."},{"key":"e_1_3_2_1_55_1","unstructured":"Yi Zeng Minzhou Pan Himanshu Jahagirdar Ming Jin Lingjuan Lyu and Ruoxi Jia. 2023. Meta-Sift: How to Sift Out a Clean Subset in the Presence of Data Poisoning?. In USENIX Security 23. 1667--1684."},{"key":"e_1_3_2_1_56_1","doi-asserted-by":"crossref","unstructured":"Yi Zeng Won Park Z Morley Mao and Ruoxi Jia. 2021. Rethinking the Backdoor Attacks' Triggers: A Frequency Perspective. In ICCV.","DOI":"10.1109\/ICCV48922.2021.01616"},{"key":"e_1_3_2_1_57_1","doi-asserted-by":"crossref","unstructured":"Chaoning Zhang Philipp Benz Tooba Imtiaz and In So Kweon. 2020. Understanding adversarial examples from the mutual influence of images and perturbations. In CVPR. 14521--14530.","DOI":"10.1109\/CVPR42600.2020.01453"},{"key":"e_1_3_2_1_58_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00068"},{"key":"e_1_3_2_1_59_1","volume-title":"Clean-Label Backdoor Attacks on Video Recognition Models. In 2020 CVPR","author":"Zhao Shihao","unstructured":"Shihao Zhao, Xingjun Ma, Xiang Zheng, James Bailey, Jingjing Chen, and Yu-Gang Jiang. 2020. Clean-Label Backdoor Attacks on Video Recognition Models. In 2020 CVPR. IEEE Computer Society, 14431--14440."}],"event":{"name":"CCS '23: ACM SIGSAC Conference on Computer and Communications Security","location":"Copenhagen Denmark","acronym":"CCS '23","sponsor":["SIGSAC ACM Special Interest Group on Security, Audit, and Control"]},"container-title":["Proceedings of the 2023 ACM SIGSAC Conference on Computer and Communications Security"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3576915.3616617","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3576915.3616617","content-type":"application\/pdf","content-version":"vor","intended-application":"syndication"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3576915.3616617","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,21]],"date-time":"2025-08-21T01:42:35Z","timestamp":1755740555000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3576915.3616617"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,11,15]]},"references-count":59,"alternative-id":["10.1145\/3576915.3616617","10.1145\/3576915"],"URL":"https:\/\/doi.org\/10.1145\/3576915.3616617","relation":{},"subject":[],"published":{"date-parts":[[2023,11,15]]},"assertion":[{"value":"2023-11-21","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}