{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,24]],"date-time":"2025-08-24T00:02:30Z","timestamp":1755993750400,"version":"3.44.0"},"publisher-location":"New York, NY, USA","reference-count":49,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,11,26]],"date-time":"2023-11-26T00:00:00Z","timestamp":1700956800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"European Union","award":["101070617"],"award-info":[{"award-number":["101070617"]}]},{"DOI":"10.13039\/501100009318","name":"Helmholtz Association","doi-asserted-by":"publisher","award":["ZT-I-OO1 4"],"award-info":[{"award-number":["ZT-I-OO1 4"]}],"id":[{"id":"10.13039\/501100009318","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100000781","name":"European Research Council","doi-asserted-by":"publisher","award":["834115"],"award-info":[{"award-number":["834115"]}],"id":[{"id":"10.13039\/501100000781","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,11,30]]},"DOI":"10.1145\/3605764.3623917","type":"proceedings-article","created":{"date-parts":[[2023,11,21]],"date-time":"2023-11-21T12:12:17Z","timestamp":1700568737000},"page":"67-78","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["Certifiers Make Neural Networks Vulnerable to Availability Attacks"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4369-2644","authenticated-orcid":false,"given":"Tobias","family":"Lorenz","sequence":"first","affiliation":[{"name":"CISPA Helmholtz Center for Information Security, Saarbr\u00fccken, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9022-7599","authenticated-orcid":false,"given":"Marta","family":"Kwiatkowska","sequence":"additional","affiliation":[{"name":"University of Oxford, Oxford, United Kingdom"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8949-9896","authenticated-orcid":false,"given":"Mario","family":"Fritz","sequence":"additional","affiliation":[{"name":"CISPA Helmholtz Center for Information Security, Saarbr\u00fccken, Germany"}]}],"member":"320","published-online":{"date-parts":[[2023,11,26]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Proceedings of the 35th International Conference on Machine Learning","volume":"80","author":"Athalye Anish","year":"2018","unstructured":"Anish Athalye, Nicholas Carlini, and David Wagner. 2018. Obfuscated Gradients Give a False Sense of Security: Circumventing Defenses to Adversarial Examples. In Proceedings of the 35th International Conference on Machine Learning, Vol. 80. PMLR, Stockholm, Sweden, 274--283."},{"key":"e_1_3_2_1_2_1","volume-title":"Blind Backdoors in Deep Learning Models. In 30th USENIX Security Symposium (USENIX Security 21)","author":"Bagdasaryan Eugene","year":"2021","unstructured":"Eugene Bagdasaryan and Vitaly Shmatikov. 2021. Blind Backdoors in Deep Learning Models. In 30th USENIX Security Symposium (USENIX Security 21). USENIX Association, Virtual, 1505--1521."},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33013240"},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"crossref","unstructured":"Nicholas Carlini and David Wagner. 2017. Towards evaluating the robustness of neural networks. In 2017 ieee symposium on security and privacy (sp). Ieee 39--57.","DOI":"10.1109\/SP.2017.49"},{"key":"e_1_3_2_1_5_1","unstructured":"Xinyun Chen Chang Liu Bo Li Kimberly Lu and Dawn Song. 2017. Targeted backdoor attacks on deep learning systems using data poisoning. arxiv: 1712.05526"},{"key":"e_1_3_2_1_6_1","volume-title":"Proceedings of the 36th International Conference on Machine Learning (Proceedings of Machine Learning Research","author":"Cohen Jeremy","year":"2019","unstructured":"Jeremy Cohen, Elan Rosenfeld, and Zico Kolter. 2019. Certified Adversarial Robustness via Randomized Smoothing. In Proceedings of the 36th International Conference on Machine Learning (Proceedings of Machine Learning Research, Vol. 97), Kamalika Chaudhuri and Ruslan Salakhutdinov (Eds.). PMLR, Long Beach, USA."},{"key":"e_1_3_2_1_7_1","unstructured":"European commission. 2020. White paper on artificial intelligence - a European approach to excellence and trust."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2018.00058"},{"key":"e_1_3_2_1_9_1","volume-title":"Explaining and Harnessing Adversarial Examples. In International Conference on Learning Representations.","author":"Goodfellow Ian","year":"2015","unstructured":"Ian Goodfellow, Jonathon Shlens, and Christian Szegedy. 2015. Explaining and Harnessing Adversarial Examples. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_10_1","unstructured":"Tianyu Gu Brendan Dolan-Gavitt and Siddharth Garg. 2019. BadNets: Identifying Vulnerabilities in the Machine Learning Model Supply Chain. arxiv: 1708.06733"},{"key":"e_1_3_2_1_11_1","unstructured":"Sanghyun Hong Nicholas Carlini and Alexey Kurakin. 2022. Handcrafted Backdoors in Deep Neural Networks. In Advances in Neural Information Processing Systems."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-63387-9_1"},{"key":"e_1_3_2_1_13_1","volume-title":"Kochenderfer","author":"Katz Guy","year":"2017","unstructured":"Guy Katz, Clark W. Barrett, David L. Dill, Kyle Julian, and Mykel J. Kochenderfer. 2017. Reluplex: An Efficient SMT Solver for Verifying Deep Neural Networks. In Computer Aided Verification - 29th International Conference."},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1109\/5.726791"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2019.00044"},{"key":"e_1_3_2_1_16_1","volume-title":"International Conference on Machine Learning. PMLR, 6212--6222","author":"Leino Klas","year":"2021","unstructured":"Klas Leino, Zifan Wang, and Matt Fredrikson. 2021. Globally-robust neural networks. In International Conference on Machine Learning. PMLR, 6212--6222."},{"key":"e_1_3_2_1_17_1","volume-title":"Degradation Attacks on Certifiably Robust Neural Networks. Transactions on Machine Learning Research","author":"Leino Klas","year":"2022","unstructured":"Klas Leino, Chi Zhang, Ravi Mangal, Matt Fredrikson, Bryan Parno, and Corina Pasareanu. 2022. Degradation Attacks on Certifiably Robust Neural Networks. Transactions on Machine Learning Research (2022)."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP46215.2023.10179303"},{"volume-title":"Computer Vision - 15th European Conference.","author":"Liang Ming","key":"e_1_3_2_1_19_1","unstructured":"Ming Liang, Bin Yang, Shenlong Wang, and Raquel Urtasun. 2018. Deep Continuous Fusion for Multi-sensor 3D Object Detection. In Computer Vision - 15th European Conference."},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-00470-5_13"},{"key":"e_1_3_2_1_21_1","unstructured":"Tobias Lorenz Marta Kwiatkowska and Mario Fritz. 2021a. Backdoor Attacks on Network Certification via Data Poisoning. arxiv: 2108.11299v1"},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00751"},{"key":"e_1_3_2_1_23_1","volume-title":"6th International Conference on Learning Representations.","author":"Madry Aleksander","year":"2018","unstructured":"Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. 2018. Towards Deep Learning Models Resistant to Adversarial Attacks. In 6th International Conference on Learning Representations."},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746293"},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01304"},{"key":"e_1_3_2_1_26_1","unstructured":"High-Level Expert Group on Artificial Intelligence. 2019a. Ethics Guidelines for Trustworthy AI."},{"key":"e_1_3_2_1_27_1","unstructured":"High-Level Expert Group on Artificial Intelligence. 2019b. Proposal for a regulation of the European parliament and of the council laying down harmonised rules on artificial intelligence (artificial intelligence act) and amending certain union legislative acts."},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1109\/EuroSP.2018.00035"},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-14295-6_24"},{"key":"e_1_3_2_1_30_1","unstructured":"Aditi Raghunathan Jacob Steinhardt and Percy Liang. 2018. Semidefinite relaxations for certifying robustness to adversarial examples. In Advances in Neural Information Processing Systems 31."},{"key":"e_1_3_2_1_31_1","volume-title":"Don't Trigger Me! A Triggerless Backdoor Attack Against Deep Neural Networks. arxiv","author":"Salem Ahmed","year":"2010","unstructured":"Ahmed Salem, Michael Backes, and Yang Zhang. 2020. Don't Trigger Me! A Triggerless Backdoor Attack Against Deep Neural Networks. arxiv: 2010.03282"},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1109\/EuroSP53844.2022.00049"},{"key":"e_1_3_2_1_33_1","volume-title":"Advances in Neural Information Processing Systems","volume":"32","author":"Salman Hadi","year":"2019","unstructured":"Hadi Salman, Greg Yang, Huan Zhang, Cho-Jui Hsieh, and Pengchuan Zhang. 2019. A Convex Relaxation Barrier to Tight Robustness Verification of Neural Networks. Advances in Neural Information Processing Systems , Vol. 32 (2019)."},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1109\/EuroSP51992.2021.00024"},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/3290354"},{"volume-title":"The 2011 international joint conference on neural networks","author":"Stallkamp Johannes","key":"e_1_3_2_1_36_1","unstructured":"Johannes Stallkamp, Marc Schlipsing, Jan Salmen, and Christian Igel. 2011. The German traffic sign recognition benchmark: a multi-class classification competition. In The 2011 international joint conference on neural networks. IEEE."},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01258-8_39"},{"key":"e_1_3_2_1_38_1","volume-title":"Proceedings of the 2nd International Conference on Learning Representations, Yoshua Bengio and Yann LeCun (Eds.).","author":"Szegedy Christian","year":"2014","unstructured":"Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian J. Goodfellow, and Rob Fergus. 2014. Intriguing properties of neural networks. In Proceedings of the 2nd International Conference on Learning Representations, Yoshua Bengio and Yann LeCun (Eds.)."},{"key":"e_1_3_2_1_39_1","volume-title":"Evaluating Robustness of Neural Networks with Mixed Integer Programming. In International Conference on Learning Representations.","author":"Tjeng Vincent","year":"2019","unstructured":"Vincent Tjeng, Kai Y Xiao, and Russ Tedrake. 2019. Evaluating Robustness of Neural Networks with Mixed Integer Programming. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_40_1","unstructured":"Florian Tram\u00e8 r Nicholas Carlini Wieland Brendel and Aleksander Madry. 2020. On Adaptive Attacks to Adversarial Example Defenses. In Advances in Neural Information Processing Systems 33."},{"key":"e_1_3_2_1_41_1","volume-title":"International Conference on Learning Representations.","author":"Tsipras Dimitris","year":"2019","unstructured":"Dimitris Tsipras, Shibani Santurkar, Logan Engstrom, Alexander Turner, and Aleksander Madry. 2019. Robustness May Be at Odds with Accuracy. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_42_1","volume-title":"Label-consistent backdoor attacks. arxiv","author":"Turner Alexander","year":"1912","unstructured":"Alexander Turner, Dimitris Tsipras, and Aleksander Madry. 2019. Label-consistent backdoor attacks. arxiv: 1912.02771"},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2906934"},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2019.00031"},{"key":"e_1_3_2_1_45_1","volume-title":"Proceedings, Part XXIII 16","author":"Wang Ren","year":"2020","unstructured":"Ren Wang, Gaoyuan Zhang, Sijia Liu, Pin-Yu Chen, Jinjun Xiong, and Meng Wang. 2020. Practical detection of trojan neural networks: Data-limited and data-free cases. In Computer Vision--ECCV 2020: 16th European Conference, Glasgow, UK, August 23--28, 2020, Proceedings, Part XXIII 16. Springer, 222--238."},{"key":"e_1_3_2_1_46_1","volume-title":"Advances in Neural Information Processing Systems","volume":"33","author":"Xu Kaidi","year":"2020","unstructured":"Kaidi Xu, Zhouxing Shi, Huan Zhang, Yihan Wang, Kai-Wei Chang, Minlie Huang, Bhavya Kailkhura, Xue Lin, and Cho-Jui Hsieh. 2020. Automatic perturbation analysis for scalable certified robustness and beyond. Advances in Neural Information Processing Systems , Vol. 33 (2020)."},{"key":"e_1_3_2_1_47_1","volume-title":"Towards Stable and Efficient Training of Verifiably Robust Neural Networks. In International Conference on Learning Representations.","author":"Zhang Huan","year":"2020","unstructured":"Huan Zhang, Hongge Chen, Chaowei Xiao, Sven Gowal, Robert Stanforth, Bo Li, Duane Boning, and Cho-Jui Hsieh. 2020. Towards Stable and Efficient Training of Verifiably Robust Neural Networks. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_48_1","volume-title":"Advances in Neural Information Processing Systems","volume":"31","author":"Zhang Huan","year":"2018","unstructured":"Huan Zhang, Tsui-Wei Weng, Pin-Yu Chen, Cho-Jui Hsieh, and Luca Daniel. 2018. Efficient Neural Network Robustness Certification with General Activation Functions. Advances in Neural Information Processing Systems , Vol. 31 (2018)."},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"publisher","DOI":"10.1145\/3374664.3375751"}],"event":{"name":"CCS '23: ACM SIGSAC Conference on Computer and Communications Security","sponsor":["SIGSAC ACM Special Interest Group on Security, Audit, and Control"],"location":"Copenhagen Denmark","acronym":"CCS '23"},"container-title":["Proceedings of the 16th ACM Workshop on Artificial Intelligence and Security"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3605764.3623917","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3605764.3623917","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,23]],"date-time":"2025-08-23T01:36:20Z","timestamp":1755912980000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3605764.3623917"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,11,26]]},"references-count":49,"alternative-id":["10.1145\/3605764.3623917","10.1145\/3605764"],"URL":"https:\/\/doi.org\/10.1145\/3605764.3623917","relation":{},"subject":[],"published":{"date-parts":[[2023,11,26]]},"assertion":[{"value":"2023-11-26","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}