{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T04:25:09Z","timestamp":1750220709761,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":43,"publisher":"ACM","license":[{"start":{"date-parts":[[2020,3,16]],"date-time":"2020-03-16T00:00:00Z","timestamp":1584316800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"Northrop Grumman Cybersecu- rity Research Consortium","award":["Defenses Against Adversarial Examples"],"award-info":[{"award-number":["Defenses Against Adversarial Examples"]}]},{"name":"United States National Science Foundation","award":["1640374"],"award-info":[{"award-number":["1640374"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2020,3,16]]},"DOI":"10.1145\/3374664.3375736","type":"proceedings-article","created":{"date-parts":[[2020,3,13]],"date-time":"2020-03-13T17:06:53Z","timestamp":1584119213000},"page":"85-96","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Random Spiking and Systematic Evaluation of Defenses Against Adversarial Examples"],"prefix":"10.1145","author":[{"given":"Huangyi","family":"Ge","sequence":"first","affiliation":[{"name":"Purdue University, West Lafayette, IN, USA"}]},{"given":"Sze Yiu","family":"Chau","sequence":"additional","affiliation":[{"name":"Purdue University, West Lafayette, IN, USA"}]},{"given":"Bruno","family":"Ribeiro","sequence":"additional","affiliation":[{"name":"Purdue University, West Lafayette, IN, USA"}]},{"given":"Ninghui","family":"Li","sequence":"additional","affiliation":[{"name":"Purdue University, West Lafayette, IN, USA"}]}],"member":"320","published-online":{"date-parts":[[2020,3,16]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"ICML","volume":"80","author":"Athalye Anish","year":"2018","unstructured":"Anish Athalye, Nicholas Carlini, and David Wagner. 2018a. Obfuscated Gradients Give a False Sense of Security: Circumventing Defenses to Adversarial Examples. In ICML, Vol. 80. PMLR, Stockholmsm\u00e4ssan, Stockholm Sweden, 274--283."},{"key":"e_1_3_2_1_2_1","volume-title":"ICML","volume":"80","author":"Athalye Anish","year":"2018","unstructured":"Anish Athalye, Logan Engstrom, Andrew Ilyas, and Kevin Kwok. 2018b. Synthesizing Robust Adversarial Examples. In ICML, Vol. 80. PMLR, 284--293."},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1080\/01621459.2017.1285773"},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-7908-2604-3_16"},{"key":"e_1_3_2_1_5_1","unstructured":"Wieland Brendel Jonas Rauber and Matthias Bethge. 2018. Decision-Based Adversarial Attacks: Reliable Attacks Against Black-Box Machine Learning Models. In ICLR ."},{"key":"e_1_3_2_1_6_1","unstructured":"Xiaoyu Cao and Neil Zhenqiang Gong. 2017. Mitigating evasion attacks to deep neural networks via region-based classification. In ACSAC. ACM 278--287."},{"key":"e_1_3_2_1_7_1","volume-title":"On Evaluating Adversarial Robustness. CoRR","author":"Carlini Nicholas","year":"2019","unstructured":"Nicholas Carlini, Anish Athalye, Nicolas Papernot, Wieland Brendel, Jonas Rauber, Dimitris Tsipras, Ian J. Goodfellow, Aleksander Madry, and Alexey Kurakin. 2019. On Evaluating Adversarial Robustness. CoRR (2019). arxiv: 1902.06705"},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1145\/3128572.3140444"},{"volume-title":"Towards evaluating the robustness of neural networks. In 2017 IEEE S&P","author":"Carlini Nicholas","key":"e_1_3_2_1_9_1","unstructured":"Nicholas Carlini and David Wagner. 2017b. Towards evaluating the robustness of neural networks. In 2017 IEEE S&P. IEEE, 39--57."},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1109\/SPW.2018.00009"},{"key":"e_1_3_2_1_11_1","volume-title":"Wagner","author":"Carlini Nicholas","year":"2017","unstructured":"Nicholas Carlini and David A. Wagner. 2017c. MagNet and \u201cEfficient Defenses Against Adversarial Attacks\u201d are Not Robust to Adversarial Examples . CoRR , Vol. abs\/1711.08478 (2017). arxiv: 1711.08478 http:\/\/arxiv.org\/abs\/1711.08478"},{"key":"e_1_3_2_1_12_1","volume-title":"Jordan","author":"Chen Jianbo","year":"2019","unstructured":"Jianbo Chen and Michael I. Jordan. 2019. Boundary Attack"},{"volume-title":"Query-Efficient Decision-Based Adversarial Attack. (2019). arxiv","year":"1904","key":"e_1_3_2_1_13_1","unstructured":": Query-Efficient Decision-Based Adversarial Attack. (2019). arxiv: 1904.02144"},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11302"},{"key":"e_1_3_2_1_15_1","first-page":"1050","article-title":"Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning","volume":"48","author":"Gal Yarin","year":"2016","unstructured":"Yarin Gal and Zoubin Ghahramani. 2016. Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning . ICML , Vol. 48, 1050--1059.","journal-title":"ICML"},{"key":"e_1_3_2_1_16_1","first-page":"2280","article-title":"Adversarial Examples Are a Natural Consequence of Test Error in Noise","volume":"97","author":"Gilmer Justin","year":"2019","unstructured":"Justin Gilmer, Nicolas Ford, Nicholas Carlini, and Ekin Cubuk. 2019. Adversarial Examples Are a Natural Consequence of Test Error in Noise. In ICML , Vol. 97. 2280--2289.","journal-title":"ICML"},{"key":"e_1_3_2_1_17_1","unstructured":"Ian Goodfellow Jonathon Shlens and Christian Szegedy. 2015. Explaining and Harnessing Adversarial Examples. In ICLR ."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"crossref","unstructured":"James J Heckman. 1977. Sample selection bias as a specification error (with an application to the estimation of labor supply functions). (1977).","DOI":"10.3386\/w0172"},{"key":"e_1_3_2_1_19_1","volume-title":"NIPS 2014 Deep Learning and Representation Learning Workshop","author":"Hinton G.","year":"2014","unstructured":"G. Hinton, O. Vinyals, and J. Dean. 2014. Distilling the Knowledge in a Neural Network . NIPS 2014 Deep Learning and Representation Learning Workshop (2014)."},{"volume-title":"Adam: A Method for Stochastic Optimization. In International Conference on Learning Representations .","author":"Kingma D. P.","key":"e_1_3_2_1_20_1","unstructured":"D. P. Kingma and J. Ba. 2015. Adam: A Method for Stochastic Optimization. In International Conference on Learning Representations ."},{"key":"e_1_3_2_1_22_1","unstructured":"Yann LeCun Corinna Cortes and Christopher JC Burges. 1998. The MNIST database of handwritten digits. (1998). http:\/\/yann.lecun.com\/exdb\/mnist\/"},{"key":"e_1_3_2_1_23_1","unstructured":"Yanpei Liu Xinyun Chen Chang Liu and Dawn Song. 2017. Delving into Transferable Adversarial Examples and Black-box Attacks. In ICLR ."},{"key":"e_1_3_2_1_24_1","unstructured":"Aleksander Madry Aleksandar Makelov Ludwig Schmidt Dimitris Tsipras and Adrian Vladu. 2018. Towards deep learning models resistant to adversarial attacks. In ICLR ."},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"crossref","unstructured":"Dongyu Meng and Hao Chen. 2017. Magnet: a two-pronged defense against adversarial examples. In CCS. ACM 135--147.","DOI":"10.1145\/3133956.3134057"},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"crossref","unstructured":"Seyed-Mohsen Moosavi-Dezfooli Alhussein Fawzi and Pascal Frossard. 2016. Deepfool: a simple and accurate method to fool deep neural networks. In CVPR . 2574--2582.","DOI":"10.1109\/CVPR.2016.282"},{"volume-title":"The limitations of deep learning in adversarial settings. In 2016 EuroS&P","author":"Papernot Nicolas","key":"e_1_3_2_1_27_1","unstructured":"Nicolas Papernot, Patrick McDaniel, Somesh Jha, Matt Fredrikson, Z Berkay Celik, and Ananthram Swami. 2016a. The limitations of deep learning in adversarial settings. In 2016 EuroS&P. IEEE, 372--387."},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"crossref","unstructured":"Nicolas Papernot Patrick D. McDaniel Xi Wu Somesh Jha and Ananthram Swami. 2016b. Distillation as a Defense to Adversarial Perturbations Against Deep Neural Networks. In 2016 IEEE S&P. 582--597.","DOI":"10.1109\/SP.2016.41"},{"key":"e_1_3_2_1_29_1","volume-title":"Reiter","author":"Sharif Mahmood","year":"2018","unstructured":"Mahmood Sharif, Lujo Bauer, and Michael K. Reiter. 2018. On the Suitability of L(_mboxp )-norms for Creating and Preventing Adversarial Examples. IEEE CVPRW ."},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.1016\/S0378-3758(00)00115-4"},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.5555\/2627435.2670313"},{"key":"e_1_3_2_1_32_1","volume-title":"et almbox","author":"Sugiyama Masashi","year":"2017","unstructured":"Masashi Sugiyama, Neil D Lawrence, Anton Schwaighofer, et almbox. 2017. Dataset shift in machine learning .The MIT Press."},{"key":"e_1_3_2_1_33_1","unstructured":"Masashi Sugiyama Shinichi Nakajima Hisashi Kashima Paul V Buenau and Motoaki Kawanabe. 2008. Direct importance estimation with model selection and its application to covariate shift adaptation. In Advances in neural information processing systems. 1433--1440."},{"key":"e_1_3_2_1_34_1","volume-title":"International Conference on Learning Representations .","author":"Szegedy Christian","year":"2014","unstructured":"Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. 2014. Intriguing properties of neural networks. In International Conference on Learning Representations ."},{"key":"e_1_3_2_1_35_1","unstructured":"Dimitris Tsipras Shibani Santurkar Logan Engstrom Alexander Turner and Aleksander Madry. 2019. Robustness may be at odds with accuracy. In ICLR ."},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"crossref","unstructured":"Pascal Vincent Hugo Larochelle Yoshua Bengio and Pierre-Antoine Manzagol. 2008. Extracting and Composing Robust Features with Denoising Autoencoders. In ICML . ACM 1096--1103.","DOI":"10.1145\/1390156.1390294"},{"key":"e_1_3_2_1_37_1","first-page":"3371","article-title":"Stacked Denoising Autoencoders: Learning Useful Representations in a Deep Network with a Local Denoising Criterion","volume":"11","author":"Vincent Pascal","year":"2010","unstructured":"Pascal Vincent, Hugo Larochelle, Isabelle Lajoie, Yoshua Bengio, and Pierre-Antoine Manzagol. 2010. Stacked Denoising Autoencoders: Learning Useful Representations in a Deep Network with a Local Denoising Criterion. In ICML, Vol. 11. 3371--3408.","journal-title":"ICML"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"crossref","unstructured":"Siyue Wang Xiao Wang Pu Zhao Wujie Wen David Kaeli Peter Chin and Xue Lin. 2018. Defensive Dropout for Hardening Deep Neural Networks Under Adversarial Attacks. In ICCAD . ACM Article 71 bibinfonumpages8 pages.","DOI":"10.1145\/3240765.3264699"},{"key":"e_1_3_2_1_39_1","volume-title":"Fashion-MNIST: a Novel Image Dataset for Benchmarking Machine Learning Algorithms . CoRR","author":"Xiao Han","year":"2017","unstructured":"Han Xiao, Kashif Rasul, and Roland Vollgraf. 2017. Fashion-MNIST: a Novel Image Dataset for Benchmarking Machine Learning Algorithms . CoRR , Vol. abs\/1708.07747 (2017). arxiv: 1708.07747"},{"key":"e_1_3_2_1_40_1","volume-title":"Yuille","author":"Xie Cihang","year":"2018","unstructured":"Cihang Xie, Jianyu Wang, Zhishuai Zhang, Zhou Ren, and Alan L. Yuille. 2018. Mitigating adversarial effects through randomization. ICLR (2018)."},{"key":"e_1_3_2_1_41_1","unstructured":"Cihang Xie Yuxin Wu Laurens van der Maaten Alan L. Yuille and Kaiming He. 2019. Feature Denoising for Improving Adversarial Robustness. In CVPR ."},{"key":"e_1_3_2_1_42_1","volume-title":"Feature Squeezing: Detecting Adversarial Examples in Deep Neural Networks. NDSS .","author":"Xu Weilin","year":"2018","unstructured":"Weilin Xu, David Evans, and Yanjun Qi. 2018. Feature Squeezing: Detecting Adversarial Examples in Deep Neural Networks. NDSS ."},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"crossref","unstructured":"Sergey Zagoruyko and Nikos Komodakis. 2016. Wide Residual Networks. In BMVC .","DOI":"10.5244\/C.30.87"},{"key":"e_1_3_2_1_44_1","volume-title":"Laurent El Ghaoui, and Michael I. Jordan","author":"Zhang Hongyang","year":"2019","unstructured":"Hongyang Zhang, Yaodong Yu, Jiantao Jiao, Eric P. Xing, Laurent El Ghaoui, and Michael I. Jordan. 2019. Theoretically Principled Trade-off between Robustness and Accuracy. In ICML . 7472--7482."}],"event":{"name":"CODASPY '20: Tenth ACM Conference on Data and Application Security and Privacy","sponsor":["SIGSAC ACM Special Interest Group on Security, Audit, and Control"],"location":"New Orleans LA USA","acronym":"CODASPY '20"},"container-title":["Proceedings of the Tenth ACM Conference on Data and Application Security and Privacy"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3374664.3375736","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3374664.3375736","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T22:33:08Z","timestamp":1750199588000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3374664.3375736"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,3,16]]},"references-count":43,"alternative-id":["10.1145\/3374664.3375736","10.1145\/3374664"],"URL":"https:\/\/doi.org\/10.1145\/3374664.3375736","relation":{},"subject":[],"published":{"date-parts":[[2020,3,16]]},"assertion":[{"value":"2020-03-16","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}