{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T17:03:08Z","timestamp":1777654988415,"version":"3.51.4"},"publisher-location":"New York, NY, USA","reference-count":52,"publisher":"ACM","license":[{"start":{"date-parts":[[2020,12,7]],"date-time":"2020-12-07T00:00:00Z","timestamp":1607299200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2020,12,7]]},"DOI":"10.1145\/3427228.3427264","type":"proceedings-article","created":{"date-parts":[[2020,12,9]],"date-time":"2020-12-09T22:20:18Z","timestamp":1607552418000},"page":"897-912","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":173,"title":["Februus: Input Purification Defense Against Trojan Attacks on Deep Neural Network Systems"],"prefix":"10.1145","author":[{"given":"Bao Gia","family":"Doan","sequence":"first","affiliation":[{"name":"The University of Adelaide, Australia"}]},{"given":"Ehsan","family":"Abbasnejad","sequence":"additional","affiliation":[{"name":"The University of Adelaide"}]},{"given":"Damith C.","family":"Ranasinghe","sequence":"additional","affiliation":[{"name":"The University of Adelaide, Australia"}]}],"member":"320","published-online":{"date-parts":[[2020,12,8]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"[n.d.]. Amazon Machine Learning. https:\/\/aws.amazon.com\/machine-learning  [n.d.]. Amazon Machine Learning. https:\/\/aws.amazon.com\/machine-learning"},{"key":"e_1_3_2_1_2_1","unstructured":"[n.d.]. Gradientzoo: pre-trained neural network models. https:\/\/www.gradientzoo.com\/  [n.d.]. Gradientzoo: pre-trained neural network models. https:\/\/www.gradientzoo.com\/"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10916-018-1088-1"},{"key":"e_1_3_2_1_4_1","unstructured":"ARO. [n.d.]. BROAD AGENCY ANNOUNCEMENT FOR TrojAI. https:\/\/www.arl.army.mil\/www\/pages\/8\/TrojAI-V3.2.pdf  ARO. [n.d.]. BROAD AGENCY ANNOUNCEMENT FOR TrojAI. https:\/\/www.arl.army.mil\/www\/pages\/8\/TrojAI-V3.2.pdf"},{"key":"e_1_3_2_1_5_1","unstructured":"Eugene Bagdasaryan and Vitaly Shmatikov. 2020. Blind Backdoors in Deep Learning Models. arxiv:2005.03823\u00a0[cs.CR]  Eugene Bagdasaryan and Vitaly Shmatikov. 2020. Blind Backdoors in Deep Learning Models. arxiv:2005.03823\u00a0[cs.CR]"},{"key":"e_1_3_2_1_6_1","volume-title":"International Conference on Artificial Intelligence and Statistics (AISTATS).","author":"Bagdasaryan Eugene","year":"2020"},{"key":"e_1_3_2_1_7_1","unstructured":"Bvlc. [n.d.]. Caffe Model Zoo. https:\/\/github.com\/BVLC\/caffe\/wiki\/Model-Zoo  Bvlc. [n.d.]. Caffe Model Zoo. https:\/\/github.com\/BVLC\/caffe\/wiki\/Model-Zoo"},{"key":"e_1_3_2_1_8_1","volume-title":"IEEE International Conference on Automatic Face and Gesture Recognition (FG).","author":"Cao Q."},{"key":"e_1_3_2_1_9_1","volume-title":"Artificial Intelligence Safety Workshop at Association for the Advancement of Artificial Intelligence (AAAI).","author":"Chen Bryant","year":"2019"},{"key":"e_1_3_2_1_10_1","volume-title":"DeepDriving: Learning Affordance for Direct Perception in Autonomous Driving. In IEEE International Conference on Computer Vision (ICCV).","author":"Chen C."},{"key":"e_1_3_2_1_11_1","volume-title":"DeepInspect: A Black-box Trojan Detection and Mitigation Framework for Deep Neural Networks. In International Joint Conference on Artificial Intelligence (IJCAI).","author":"Chen Huili","year":"2019"},{"key":"e_1_3_2_1_12_1","unstructured":"Xinyun Chen Chang Liu Bo Li Kimberly Lu and Dawn Song. 2017. Targeted Backdoor Attacks on Deep Learning Systems Using Data Poisoning. arxiv:1712.05526\u00a0[cs.CR]  Xinyun Chen Chang Liu Bo Li Kimberly Lu and Dawn Song. 2017. Targeted Backdoor Attacks on Deep Learning Systems Using Data Poisoning. arxiv:1712.05526\u00a0[cs.CR]"},{"key":"e_1_3_2_1_13_1","volume-title":"SentiNet: Detecting Physical Attacks Against Deep Learning Systems. In Deep Learning and Security Workshop at IEEE Security and Privacy (S&P).","author":"Chou Edward","year":"2020"},{"key":"e_1_3_2_1_14_1","unstructured":"Yansong Gao Yeonjae Kim Bao\u00a0Gia Doan Zhi Zhang Gongxuan Zhang Surya Nepal Damith\u00a0C. Ranasinghe and Hyoungshick Kim. 2019. Design and Evaluation of a Multi-Domain Trojan Detection Method on Deep Neural Networks. arxiv:1911.10312\u00a0[cs.CR]  Yansong Gao Yeonjae Kim Bao\u00a0Gia Doan Zhi Zhang Gongxuan Zhang Surya Nepal Damith\u00a0C. Ranasinghe and Hyoungshick Kim. 2019. Design and Evaluation of a Multi-Domain Trojan Detection Method on Deep Neural Networks. arxiv:1911.10312\u00a0[cs.CR]"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1145\/3359789.3359790"},{"key":"e_1_3_2_1_16_1","unstructured":"Ian Goodfellow Jean Pouget-Abadie Mehdi Mirza Bing Xu David Warde-Farley Sherjil Ozair Aaron Courville and Yoshua Bengio. 2014. Generative Adversarial Nets. In Advances in Neural Information Processing Systems (NeurIPS).  Ian Goodfellow Jean Pouget-Abadie Mehdi Mirza Bing Xu David Warde-Farley Sherjil Ozair Aaron Courville and Yoshua Bengio. 2014. Generative Adversarial Nets. In Advances in Neural Information Processing Systems (NeurIPS)."},{"key":"e_1_3_2_1_17_1","volume-title":"Explaining and Harnessing Adversarial Examples. In International Conference on Learning Representations (ICLR).","author":"Goodfellow Ian","year":"2015"},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2909068"},{"key":"e_1_3_2_1_19_1","unstructured":"Ishaan Gulrajani Faruk Ahmed Martin Arjovsky Vincent Dumoulin and Aaron\u00a0C Courville. 2017. Improved Training of Wasserstein GANs. In Advances in Neural Information Processing Systems (NeurIPS).  Ishaan Gulrajani Faruk Ahmed Martin Arjovsky Vincent Dumoulin and Aaron\u00a0C Courville. 2017. Improved Training of Wasserstein GANs. In Advances in Neural Information Processing Systems (NeurIPS)."},{"key":"e_1_3_2_1_20_1","volume-title":"TABOR: A Highly Accurate Approach to Inspecting and Restoring Trojan Backdoors in AI Systems. arxiv:1908.01763\u00a0[cs.CR]","author":"Guo Wenbo","year":"2019"},{"key":"e_1_3_2_1_21_1","volume-title":"Deep Residual Learning for Image Recognition. IEEE Conference on Computer Vision and Pattern Recognition (CVPR).","author":"He Kaiming","year":"2016"},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1145\/3072959.3073659"},{"key":"e_1_3_2_1_23_1","unstructured":"Jing\u00a0Yu Koh. [n.d.]. Model Zoo. https:\/\/modelzoo.co\/  Jing\u00a0Yu Koh. [n.d.]. Model Zoo. https:\/\/modelzoo.co\/"},{"key":"e_1_3_2_1_24_1","unstructured":"Alex Krizhevsky Geoffrey Hinton 2009. Learning multiple layers of features from tiny images. (2009).  Alex Krizhevsky Geoffrey Hinton 2009. Learning multiple layers of features from tiny images. (2009)."},{"key":"e_1_3_2_1_25_1","volume-title":"MNIST handwritten digit database. ATT Labs","author":"LeCun Yann","year":"2010"},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2018.2801560"},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1109\/TDSC.2020.3021407"},{"key":"e_1_3_2_1_28_1","volume-title":"The architectural implications of autonomous driving: Constraints and acceleration","author":"Lin Shih-Chieh"},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-00470-5_13"},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.1145\/3319535.3363216"},{"key":"e_1_3_2_1_31_1","volume-title":"Trojaning Attack on Neural Networks. In Network and Distributed System Security Symposium (NDSS).","author":"Liu Yingqi","year":"2018"},{"key":"e_1_3_2_1_32_1","volume-title":"Reflection Backdoor: A Natural Backdoor Attack on Deep Neural Networks. In European Conference on Computer Vision (ECCV).","author":"Liu Yunfei","year":"2020"},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCD.2017.16"},{"key":"e_1_3_2_1_34_1","volume-title":"International Conference on Learning Representations (ICLR).","author":"Madry Aleksander","year":"2018"},{"key":"e_1_3_2_1_35_1","volume-title":"International Joint Conference on Neural Networks (IJCNN).","author":"Mathias M."},{"key":"e_1_3_2_1_36_1","volume-title":"Deep Face Recognition. In British Machine Vision Conference (BMVC).","author":"Parkhi M."},{"key":"e_1_3_2_1_37_1","volume-title":"Hidden Trigger Backdoor Attacks","author":"Saha Aniruddha"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2016.2599820"},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.74"},{"key":"e_1_3_2_1_40_1","volume-title":"Very Deep Convolutional Networks for Large-Scale Image Recognition. In International Conference on Learning Representations (ICLR).","author":"Simonyan Karen","year":"2015"},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2012.02.016"},{"key":"e_1_3_2_1_42_1","volume-title":"International Conference on Learning Representations (ICLR).","author":"Szegedy Christian","year":"2014"},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.220"},{"key":"e_1_3_2_1_44_1","first-page":"2579","article-title":"Visualizing Data using t-SNE","author":"van\u00a0der Maaten Laurens","year":"2008","journal-title":"Journal of Machine Learning Research 9"},{"key":"e_1_3_2_1_45_1","volume-title":"Jinyuan jia, and Neil\u00a0Zhenqiang Gong","author":"Wang Binghui","year":"2020"},{"key":"e_1_3_2_1_46_1","volume-title":"Neural Cleanse: Identifying and Mitigating Backdoor Attacks in Neural Networks. In IEEE Symposium on Security and Privacy (S&P).","author":"Wang Bolun","year":"2019"},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"publisher","DOI":"10.1145\/3097983.3098158"},{"key":"e_1_3_2_1_48_1","volume-title":"RAB: Provable Robustness Against Backdoor Attacks. arxiv:2003.08904\u00a0[cs.LG]","author":"Weber Maurice","year":"2020"},{"key":"e_1_3_2_1_49_1","unstructured":"C. Wierzynski. 2018. The Challenges and Opportunities of Explainable AI. https:\/\/www.intel.ai\/the-challenges-and-opportunities-of-explainable-ai  C. Wierzynski. 2018. The Challenges and Opportunities of Explainable AI. https:\/\/www.intel.ai\/the-challenges-and-opportunities-of-explainable-ai"},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"publisher","DOI":"10.1145\/2619239.2631434"},{"key":"e_1_3_2_1_51_1","volume-title":"USENIX Security Symposium.","author":"Zhang Xinyang","year":"2020"},{"key":"e_1_3_2_1_52_1","unstructured":"Zaixi Zhang Jinyuan Jia Binghui Wang and Neil\u00a0Zhenqiang Gong. 2020. Backdoor Attacks to Graph Neural Networks. arxiv:2006.11165\u00a0[cs.CR]  Zaixi Zhang Jinyuan Jia Binghui Wang and Neil\u00a0Zhenqiang Gong. 2020. Backdoor Attacks to Graph Neural Networks. arxiv:2006.11165\u00a0[cs.CR]"}],"event":{"name":"ACSAC '20: Annual Computer Security Applications Conference","location":"Austin USA","acronym":"ACSAC '20"},"container-title":["Annual Computer Security Applications Conference"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3427228.3427264","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3427228.3427264","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T22:02:24Z","timestamp":1750197744000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3427228.3427264"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,12,7]]},"references-count":52,"alternative-id":["10.1145\/3427228.3427264","10.1145\/3427228"],"URL":"https:\/\/doi.org\/10.1145\/3427228.3427264","relation":{},"subject":[],"published":{"date-parts":[[2020,12,7]]},"assertion":[{"value":"2020-12-08","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}