{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,6]],"date-time":"2026-03-06T19:02:14Z","timestamp":1772823734316,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":69,"publisher":"ACM","license":[{"start":{"date-parts":[[2021,12,6]],"date-time":"2021-12-06T00:00:00Z","timestamp":1638748800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2021,12,6]]},"DOI":"10.1145\/3485832.3485904","type":"proceedings-article","created":{"date-parts":[[2021,12,6]],"date-time":"2021-12-06T13:42:32Z","timestamp":1638798152000},"page":"31-44","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":13,"title":["Two Souls in an Adversarial Image: Towards Universal Adversarial Example Detection using Multi-view Inconsistency"],"prefix":"10.1145","author":[{"given":"Sohaib","family":"Kiani","sequence":"first","affiliation":[{"name":"University of Kansas"}]},{"given":"Sana","family":"Awan","sequence":"additional","affiliation":[{"name":"University of Kansas"}]},{"given":"Chao","family":"Lan","sequence":"additional","affiliation":[{"name":"University of Oklahoma"}]},{"given":"Fengjun","family":"Li","sequence":"additional","affiliation":[{"name":"University of Kansas, United States of America"}]},{"given":"Bo","family":"Luo","sequence":"additional","affiliation":[{"name":"University of Kansas, United States of America"}]}],"member":"320","published-online":{"date-parts":[[2021,12,6]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"International Conference on Machine Learning (ICML).","author":"Athalye Anish","year":"2018","unstructured":"Anish Athalye, Nicholas Carlini, and David Wagner. 2018. Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples. In International Conference on Machine Learning (ICML)."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-88418-5_22"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"crossref","unstructured":"Battista Biggio Igino Corona Davide Maiorca Blaine Nelson Nedim \u0160rndi\u0107 Pavel Laskov Giorgio Giacinto and Fabio Roli. 2013. Evasion attacks against machine learning at test time. In Machine Learning and Knowledge Discovery in Databases.","DOI":"10.1007\/978-3-642-40994-3_25"},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1145\/3134600.3134606"},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1145\/3128572.3140444"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.49"},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01446"},{"key":"e_1_3_2_1_8_1","volume-title":"International Conference on Machine Learning (ICML).","author":"Cohen Jeremy","year":"2019","unstructured":"Jeremy Cohen, Elan Rosenfeld, and Zico Kolter. 2019. Certified adversarial robustness via randomized smoothing. In International Conference on Machine Learning (ICML)."},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1145\/3427228.3427264"},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00957"},{"key":"e_1_3_2_1_11_1","unstructured":"Gamaleldin Elsayed Shreya Shankar Brian Cheung Nicolas Papernot Alexey Kurakin Ian Goodfellow and Jascha Sohl-Dickstein. 2018. Adversarial examples that fool both computer vision and time-limited humans. In Advances in Neural Information Processing Systems (NeurIPS)."},{"key":"e_1_3_2_1_12_1","unstructured":"Reuben Feinman Ryan\u00a0R Curtin Saurabh Shintre and Andrew\u00a0B Gardner. 2017. Detecting adversarial samples from artifacts. arxiv:1703.00410"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1145\/2810103.2813677"},{"key":"e_1_3_2_1_14_1","unstructured":"Zhitao Gong Wenlu Wang and Wei-Shinn Ku. 2017. Adversarial and Clean Data Are Not Twins. arXiv:1704.04960"},{"key":"e_1_3_2_1_15_1","volume-title":"Explaining and Harnessing Adversarial Examples. In International Conference on Learning Representations (ICLR).","author":"Goodfellow J.","year":"2015","unstructured":"Ian\u00a0J. Goodfellow, Jonathon Shlens, and Christian Szegedy. 2015. Explaining and Harnessing Adversarial Examples. In International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"crossref","unstructured":"Sorin Grigorescu Bogdan Trasnea Tiberiu Cocias and Gigel Macesanu. 2020. A survey of deep learning techniques for autonomous driving. Journal of Field Robotics(2020).","DOI":"10.1002\/rob.21918"},{"key":"e_1_3_2_1_17_1","unstructured":"Kathrin Grosse Praveen Manoharan Nicolas Papernot Michael Backes and Patrick McDaniel. 2017. On the (Statistical) Detection of Adversarial Examples. arXiv:1702.06280"},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1109\/ISPACS.2017.8266448"},{"key":"e_1_3_2_1_19_1","volume-title":"Early Methods for Detecting Adversarial Images. In International Conference on Learning Representations (ICLR).","author":"Hendrycks Dan","year":"2017","unstructured":"Dan Hendrycks and Kevin Gimpel. 2017. Early Methods for Detecting Adversarial Images. In International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_20_1","unstructured":"Hossein Hosseini S. Kannan and R. Poovendran. 2019. Are Odds Really Odd? Bypassing Statistical Detection of Adversarial Examples. arXiv:1907.12138"},{"key":"e_1_3_2_1_21_1","unstructured":"A. Ilyas S. Santurkar L. Engstrom B. Tran and A. Madry. 2019. Adversarial Examples Are Not Bugs They Are Features. In Advances in Neural Information Processing Systems (NeurIPS)."},{"key":"e_1_3_2_1_22_1","volume-title":"International Encyclopedia of Statistical Science","author":"Joyce M.","year":"2011","unstructured":"James\u00a0M. Joyce. 2011. Kullback-Leibler Divergence. International Encyclopedia of Statistical Science (2011)."},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1145\/3384217.3385625"},{"key":"e_1_3_2_1_24_1","unstructured":"Alex Krizhevsky. 2009. Learning multiple layers of features from tiny images."},{"key":"e_1_3_2_1_25_1","unstructured":"Kimin Lee Kibok Lee Honglak Lee and Jinwoo Shin. 2018. A Simple Unified Framework for Detecting Out-of-Distribution Samples and Adversarial Attacks. In Advances in Neural Information Processing Systems (NeurIPS)."},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298958"},{"key":"e_1_3_2_1_27_1","volume-title":"Isolation Forest. In IEEE International Conference on Data Mining (ICDM).","author":"Liu Fei\u00a0Tony","year":"2008","unstructured":"Fei\u00a0Tony Liu, Kai\u00a0Ming Ting, and Zhi-Hua Zhou. 2008. Isolation Forest. In IEEE International Conference on Data Mining (ICDM)."},{"key":"e_1_3_2_1_28_1","volume-title":"Trojaning Attack on Neural Networks. In Network and Distributed System Security Symposium (NDSS).","author":"Liu Yingqi","unstructured":"Yingqi Liu, Shiqing Ma, Yousra Aafer, W. Lee, Juan Zhai, Weihang Wang, and X. Zhang. 2018. Trojaning Attack on Neural Networks. In Network and Distributed System Security Symposium (NDSS)."},{"key":"e_1_3_2_1_29_1","volume-title":"International Conference on Learning Representations (ICLR).","author":"Madry Aleksander","year":"2018","unstructured":"Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. 2018. Towards Deep Learning Models Resistant to Adversarial Attacks. In International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_30_1","volume-title":"International Conference of Learning Representation (ICLR).","author":"Metzen Jan\u00a0Hendrik","year":"2017","unstructured":"Jan\u00a0Hendrik Metzen, Tim Genewein, Volker Fischer, and Bastian Bischoff. 2017. On detecting adversarial perturbations. International Conference of Learning Representation (ICLR)."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.282"},{"key":"e_1_3_2_1_32_1","volume-title":"Deep Learning for Safe Autonomous Driving: Current Challenges and Future Directions","author":"Muhammad Khan","year":"2020","unstructured":"Khan Muhammad, Amin Ullah, Jaime Lloret, Javier Del\u00a0Ser, and Victor Hugo\u00a0C de Albuquerque. 2020. Deep Learning for Safe Autonomous Driving: Current Challenges and Future Directions. IEEE Transactions on Intelligent Transportation Systems (2020)."},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298640"},{"key":"e_1_3_2_1_34_1","unstructured":"A\u00e4ron van\u00a0den Oord Nal Kalchbrenner Oriol Vinyals Lasse Espeholt Alex Graves and Koray Kavukcuoglu. 2016. Conditional Image Generation with PixelCNN Decoders. In Advances in Neural Information Processing Systems (NeurIPS)."},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/3372297.3417253"},{"key":"e_1_3_2_1_36_1","unstructured":"Nicolas Papernot Fartash Faghri Nicholas Carlini Ian Goodfellow Reuben Feinman Alexey Kurakin Cihang Xie Yash Sharma Tom Brown Aurko Roy Alexander Matyasko Vahid Behzadan Karen Hambardzumyan Zhishuai Zhang Yi-Lin Juang Zhi Li Ryan Sheatsley Abhibhav Garg Jonathan Uesato Willi Gierke Yinpeng Dong David Berthelot Paul Hendricks Jonas Rauber and Rujun Long. 2018. Technical Report on the CleverHans v2.1.0 Adversarial Examples Library. arXiv preprint arXiv:1610.00768."},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1109\/EuroSP.2016.36"},{"key":"e_1_3_2_1_38_1","volume-title":"Detecting and Diagnosing Adversarial Images with Class-Conditional Capsule Reconstructions. In International Conference on Learning Representations (ICLR).","author":"Qin Yao","year":"2020","unstructured":"Yao Qin, Nicholas Frosst, Sara Sabour, Colin Raffel, Garrison Cottrell, and Geoffrey Hinton. 2020. Detecting and Diagnosing Adversarial Images with Class-Conditional Capsule Reconstructions. In International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_39_1","volume-title":"International Conference on Learning Representations (ICLR).","author":"Ramachandran Prajit","year":"2017","unstructured":"Prajit Ramachandran, Tom\u00a0Le Paine, Pooya Khorrami, Mohammad Babaeizadeh, Shiyu Chang, Yang Zhang, Mark\u00a0A Hasegawa-Johnson, Roy\u00a0H Campbell, and Thomas\u00a0S Huang. 2017. Fast generation for convolutional autoregressive models. International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_40_1","volume-title":"Deep convolutional neural networks for image classification: A comprehensive review. Neural computation","author":"Rawat Waseem","year":"2017","unstructured":"Waseem Rawat and Zenghui Wang. 2017. Deep convolutional neural networks for image classification: A comprehensive review. Neural computation (2017)."},{"key":"e_1_3_2_1_41_1","volume-title":"International Conference on Learning Representations (ICLR).","author":"Rezaei Shahbaz","year":"2019","unstructured":"Shahbaz Rezaei and Xin Liu. 2019. A Target-Agnostic Attack on Deep Models: Exploiting Security Vulnerabilities of Transfer Learning. In International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_42_1","volume-title":"Proceedings of International Conference on Machine Learning (PMLR).","author":"Roth Kevin","year":"2019","unstructured":"Kevin Roth, Yannic Kilcher, and Thomas Hofmann. 2019. The Odds are Odd: A Statistical Test for Detecting Adversarial Examples. In Proceedings of International Conference on Machine Learning (PMLR)."},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"crossref","unstructured":"Olga Russakovsky Jia Deng Hao Su Jonathan Krause Sanjeev Satheesh Sean Ma Zhiheng Huang Andrej Karpathy Aditya Khosla Michael Bernstein A.\u00a0C. Berg and L. Fei-Fei. 2015. ImageNet Large Scale Visual Recognition Challenge. Int. journal Computer Vision (IJCV)(2015).","DOI":"10.1007\/s11263-015-0816-y"},{"key":"e_1_3_2_1_44_1","volume-title":"International Conference on Learning Representations (ICLR).","author":"Salimans Tim","year":"2017","unstructured":"Tim Salimans, Andrej Karpathy, Xi Chen, and Diederik\u00a0P Kingma. 2017. Pixelcnn++: Improving the pixelcnn with discretized logistic mixture likelihood and other modifications. In International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_45_1","volume-title":"Defense-GAN: Protecting Classifiers Against Adversarial Attacks Using Generative Models. International Conference of Learning Representation (ICLR).","author":"Samangouei Pouya","year":"2018","unstructured":"Pouya Samangouei, Maya Kabkab, and Rama Chellappa. 2018. Defense-GAN: Protecting Classifiers Against Adversarial Attacks Using Generative Models. International Conference of Learning Representation (ICLR)."},{"key":"e_1_3_2_1_46_1","unstructured":"Ali Shafahi W\u00a0Ronny Huang Mahyar Najibi Octavian Suciu Christoph Studer Tudor Dumitras and Tom Goldstein. 2018. Poison frogs! targeted clean-label poisoning attacks on neural networks. In Advances in Neural Information Processing Systems (NeurIPS)."},{"key":"e_1_3_2_1_47_1","volume-title":"Mastering the game of Go with deep neural networks and tree search. Nature","author":"Silver D.","year":"2016","unstructured":"D. Silver, A. Huang, C. Maddison, A. Guez, L. Sifre, G. Van Den\u00a0Driessche, J. Schrittwieser, I. Antonoglou, 2016. Mastering the game of Go with deep neural networks and tree search. Nature (2016)."},{"key":"e_1_3_2_1_48_1","volume-title":"International Conference on Learning Representations (ICLR).","author":"Song Yang","year":"2018","unstructured":"Yang Song, Taesup Kim, Sebastian Nowozin, Stefano Ermon, and Nate Kushman. 2018. PixelDefend: Leveraging Generative Models to Understand and Defend against Adversarial Examples. In International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"crossref","unstructured":"J. Stallkamp M. Schlipsing J. Salmen and C. Igel. 2012. Man vs. computer: Benchmarking machine learning algorithms for traffic sign recognition. Neural Networks (2012).","DOI":"10.1016\/j.neunet.2012.02.016"},{"key":"e_1_3_2_1_50_1","volume-title":"Random Forests. Machine Learning","author":"Statistics Leo\u00a0Breiman","year":"2001","unstructured":"Leo\u00a0Breiman Statistics and Leo Breiman. 2001. Random Forests. Machine Learning (2001)."},{"key":"e_1_3_2_1_51_1","unstructured":"Jacob Steinhardt Pang\u00a0Wei Koh and Percy Liang. 2017. Certified defenses for data poisoning attacks. In Advances in Neural Information Processing Systems (NeurIPS)."},{"key":"e_1_3_2_1_52_1","unstructured":"Yi Sun Ding Liang Xiaogang Wang and Xiaoou Tang. 2015. Deepid3: Face recognition with very deep neural networks. CoRR abs\/1502.00873. arXiv:1502.00873http:\/\/arxiv.org\/abs\/1502.00873"},{"key":"e_1_3_2_1_53_1","unstructured":"Christian Szegedy Wojciech Zaremba Ilya Sutskever Joan Bruna Dumitru Erhan Ian Goodfellow and Rob Fergus. 2013. Intriguing properties of neural networks. arXiv:1312.6199."},{"key":"e_1_3_2_1_54_1","volume-title":"30th USENIX Security Symposium (USENIX Security 21)","author":"Tang Di","year":"2021","unstructured":"Di Tang, XiaoFeng Wang, Haixu Tang, and Kehuan Zhang. 2021. Demon in the variant: Statistical analysis of dnns for robust backdoor contamination detection. 30th USENIX Security Symposium (USENIX Security 21)."},{"key":"e_1_3_2_1_55_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3403064"},{"key":"e_1_3_2_1_56_1","unstructured":"Florian Tram\u00e8r Nicholas Carlini Wieland Brendel and Aleksander Madry. 2020. On Adaptive Attacks to Adversarial Example Defenses. In Advances in Neural Information Processing Systems (NeurIPS)."},{"key":"e_1_3_2_1_57_1","volume-title":"International Conference on Learning Representations (ICLR).","author":"Tsipras Dimitris","year":"2018","unstructured":"Dimitris Tsipras, Shibani Santurkar, Logan Engstrom, Alexander Turner, and Aleksander Madry. 2018. Robustness May Be at Odds with Accuracy. In International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_58_1","unstructured":"Benigno Uria Marc-Alexandre C\u00f4t\u00e9 Karol Gregor Iain Murray and Hugo Larochelle. 2016. Neural Autoregressive Distribution Estimation. Journal of Machine Learning Research(2016)."},{"key":"e_1_3_2_1_59_1","unstructured":"Giovanni Vacanti and Arnaud\u00a0Van Looveren. 2020. Adversarial Detection and Correction by Matching Prediction Distributions. CoRR abs\/2002.09364. arXiv:2002.09364https:\/\/arxiv.org\/abs\/2002.09364"},{"key":"e_1_3_2_1_60_1","volume-title":"International Conference on Machine Learning (ICML).","author":"Van\u00a0Oord Aaron","year":"2016","unstructured":"Aaron Van\u00a0Oord, Nal Kalchbrenner, and Koray Kavukcuoglu. 2016. Pixel recurrent neural networks. In International Conference on Machine Learning (ICML)."},{"key":"e_1_3_2_1_61_1","volume-title":"Towards Understanding and Improving the Transferability of Adversarial Examples in Deep Neural Networks. In Asian Conference on Machine Learning.","author":"Wu Lei","year":"2020","unstructured":"Lei Wu and Zhanxing Zhu. 2020. Towards Understanding and Improving the Transferability of Adversarial Examples in Deep Neural Networks. In Asian Conference on Machine Learning."},{"key":"e_1_3_2_1_62_1","doi-asserted-by":"publisher","DOI":"10.1109\/CSF.2016.32"},{"key":"e_1_3_2_1_63_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i4.16404"},{"key":"e_1_3_2_1_64_1","doi-asserted-by":"publisher","DOI":"10.5555\/3304222.3304312"},{"key":"e_1_3_2_1_65_1","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2018.23198"},{"key":"e_1_3_2_1_66_1","volume-title":"International Conference on Learning Representations (ICLR).","author":"Xu Yilun","year":"2021","unstructured":"Yilun Xu, Yang Song, Sahaj Garg, Linyuan Gong, Rui Shu, Aditya Grover, and Stefano Ermon. 2021. Anytime Sampling for Autoregressive Models via Ordered Autoencoding. In International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_67_1","volume-title":"Wide Residual Networks. British Machine Vision Conference (BMV).","author":"Zagoruyko Sergey","year":"2016","unstructured":"Sergey Zagoruyko and Nikos Komodakis. 2016. Wide Residual Networks. British Machine Vision Conference (BMV)."},{"key":"e_1_3_2_1_68_1","volume-title":"International Conference on Machine Learning (ICML).","author":"Zhang Hongyang","year":"2019","unstructured":"Hongyang Zhang, Yaodong Yu, Jiantao Jiao, Eric\u00a0P. Xing, Laurent\u00a0El Ghaoui, and Michael\u00a0I. Jordan. 2019. Theoretically Principled Trade-off between Robustness and Accuracy. In International Conference on Machine Learning (ICML)."},{"key":"e_1_3_2_1_69_1","unstructured":"Zhihao Zheng and Pengyu Hong. 2018. Robust Detection of Adversarial Attacks by Modeling the Intrinsic Properties of Deep Neural Networks. In Advances in Neural Information Processing Systems (NeurIPS)."}],"event":{"name":"ACSAC '21: Annual Computer Security Applications Conference","location":"Virtual Event USA","acronym":"ACSAC '21"},"container-title":["Annual Computer Security Applications Conference"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3485832.3485904","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3485832.3485904","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T19:17:59Z","timestamp":1755890279000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3485832.3485904"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,12,6]]},"references-count":69,"alternative-id":["10.1145\/3485832.3485904","10.1145\/3485832"],"URL":"https:\/\/doi.org\/10.1145\/3485832.3485904","relation":{},"subject":[],"published":{"date-parts":[[2021,12,6]]},"assertion":[{"value":"2021-12-06","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}