{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,5]],"date-time":"2026-03-05T15:33:09Z","timestamp":1772724789146,"version":"3.50.1"},"reference-count":62,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,5,1]],"date-time":"2023-05-01T00:00:00Z","timestamp":1682899200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-009"},{"start":{"date-parts":[[2023,5,1]],"date-time":"2023-05-01T00:00:00Z","timestamp":1682899200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-001"}],"funder":[{"DOI":"10.13039\/100000879","name":"Alfred P. Sloan Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100000879","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100000781","name":"European Research Council","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100000781","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,5]]},"DOI":"10.1109\/sp46215.2023.10179451","type":"proceedings-article","created":{"date-parts":[[2023,7,21]],"date-time":"2023-07-21T17:18:15Z","timestamp":1689959895000},"page":"1311-1328","source":"Crossref","is-referenced-by-count":62,"title":["RAB: Provable Robustness Against Backdoor Attacks"],"prefix":"10.1109","author":[{"given":"Maurice","family":"Weber","sequence":"first","affiliation":[{"name":"ETH Zurich,Switzerland"}]},{"given":"Xiaojun","family":"Xu","sequence":"additional","affiliation":[{"name":"University of Illinois at Urbana-Champaign,USA"}]},{"given":"Bojan","family":"Karla\u0161","sequence":"additional","affiliation":[{"name":"ETH Zurich,Switzerland"}]},{"given":"Ce","family":"Zhang","sequence":"additional","affiliation":[{"name":"ETH Zurich,Switzerland"}]},{"given":"Bo","family":"Li","sequence":"additional","affiliation":[{"name":"University of Illinois at Urbana-Champaign,USA"}]}],"member":"263","reference":[{"key":"ref1","first-page":"274","article-title":"Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples","volume-title":"International conference on machine learning","author":"Athalye"},{"key":"ref2","first-page":"1467","article-title":"Poisoning attacks against support vector machines","volume-title":"Proceedings of the 29th International Coference on Machine Learning","author":"Biggio"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414862"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/3134600.3134606"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3128572.3140444"},{"key":"ref6","article-title":"Detecting backdoor attacks on deep neural networks by activation clustering","author":"Chen","year":"2019","journal-title":"SafeAI@ AAAI"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/647"},{"key":"ref8","article-title":"Targeted back-door attacks on deep learning systems using data poisoning","author":"Chen","year":"2017"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/SPW50608.2020.00025"},{"key":"ref10","first-page":"1310","article-title":"Certified adversarial robustness via randomized smoothing","volume-title":"Proceedings of the 36th International Conference on Machine Learning","volume":"97","author":"Cohen"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref12","article-title":"UCI machine learning repository","author":"Dua","year":"2017"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/3359789.3359790"},{"key":"ref14","article-title":"Explaining and harnessing adversarial examples","volume-title":"International Conference on Learning Representations","author":"Goodfellow"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2909068"},{"key":"ref16","article-title":"Badnets: Identifying vulnerabilities in the machine learning model supply chain","author":"Gu","year":"2017"},{"key":"ref17","first-page":"4129","article-title":"Spectre: defending against backdoor attacks using robust statistics","volume-title":"Proceedings of the 38th International Conference on Machine Learning","volume":"139","author":"Hayase"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref19","article-title":"Imagenette","author":"Howard"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i9.16971"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i9.21191"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.14778\/3430915.3430917"},{"key":"ref23","article-title":"Learning multiple layers of features from tiny images","author":"Krizhevsky","year":"2009","journal-title":"Tech. Rep."},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/5.726791"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2019.00044"},{"key":"ref26","article-title":"Deep partition aggregation: Provable defenses against general poisoning attacks","volume-title":"9th International Conference on Learning Representations","author":"Levine"},{"key":"ref27","first-page":"1885","article-title":"Data poisoning attacks on factorization-based collaborative filtering","author":"Li","year":"2016","journal-title":"Advances in neural information processing systems"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/sp46215.2023.10179303"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1145\/3460120.3485258"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01234-2_23"},{"key":"ref31","article-title":"Characterizing adversarial subspaces using local intrinsic dimensionality","volume-title":"International Conference on Learning Representations","author":"Ma"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/657"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.06083"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/3128572.3140451"},{"key":"ref35","article-title":"On the problem of the most efficient tests of statistical hypotheses. 231","volume":"289","author":"Neyman","year":"1933","journal-title":"Phil. Trans. Roy. Statistical Soc. A"},{"key":"ref36","article-title":"Adversarial robustness toolbox v1. 0.0","author":"Nicolae","year":"2018"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1201.0490"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP42928.2021.9506313"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1093\/bioinformatics\/btab727"},{"key":"ref40","first-page":"8230","article-title":"Certified robustness to label-flipping attacks via randomized smoothing","volume-title":"International Conference on Machine Learning","author":"Rosenfeld"},{"key":"ref41","first-page":"11 957","article-title":"Hidden trigger back-door attacks","volume-title":"Proceedings of the AAAI Conference on Artificial Intelligence","volume":"34","author":"Saha"},{"key":"ref42","first-page":"9389","article-title":"Just how toxic is data poisoning? a unified benchmark for backdoor and data poisoning attacks","volume-title":"Proceedings of the 38th International Conference on Machine Learning","volume":"139","author":"Schwarzschild"},{"key":"ref43","first-page":"9389","article-title":"Just how toxic is data poisoning? a unified benchmark for backdoor and data poisoning attacks","volume-title":"International Conference on Machine Learning","author":"Schwarzschild"},{"key":"ref44","first-page":"6106","article-title":"Poison frogs! targeted clean-label poisoning attacks on neural networks","volume-title":"Proceedings of the 32nd International Conference on Neural Information Processing Systems","author":"Shafahi"},{"key":"ref45","first-page":"3520","article-title":"Certified defenses for data poisoning attacks","volume-title":"Proceedings of the 31st International Conference on Neural Information Processing Systems","author":"Steinhardt"},{"key":"ref46","article-title":"Poisoned classifiers are not only backdoored, they are fundamentally broken","author":"Sun","year":"2020"},{"key":"ref47","first-page":"1541","article-title":"Demon in the variant: Statistical analysis of {DNNs} for robust backdoor contamination detection","volume-title":"30th USENIX Security Symposium","author":"Tang"},{"key":"ref48","first-page":"8000","article-title":"Spectral signatures in backdoor attacks","author":"Tran","year":"2018","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref49","article-title":"On certifying robustness against backdoor attacks via randomized smoothing","author":"Wang","year":"2020","journal-title":"CVPR 2020 Workshop on Adversarial Machine Learning in Computer Vision"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2019.00031"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/GlobalSIP.2018.8646578"},{"key":"ref52","article-title":"Sha-2 \u2014 Wikipedia, the free encyclopedia","author":"contributors","year":"2020"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/543"},{"key":"ref54","first-page":"11 372","article-title":"Crfl: Certifiably robust federated learning against backdoor attacks","volume-title":"International Conference on Machine Learning","author":"Xie"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2018.23198"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/SP40001.2021.00034"},{"key":"ref57","article-title":"Generative poisoning attack method against neural networks","author":"Yang","year":"2017"},{"key":"ref58","first-page":"10 693","article-title":"Randomized smoothing of all shapes and sizes","volume-title":"International Conference on Machine Learning","author":"Yang"},{"key":"ref59","article-title":"Characterizing audio adversarial examples using temporal dependency","volume-title":"International Conference on Learning Representations","author":"Yang"},{"key":"ref60","article-title":"End-to-end robustness for sensing-reasoning machine learning pipelines","author":"Yang","year":"2020"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1145\/3374664.3375751"},{"key":"ref62","first-page":"7614","article-title":"Transferable clean-label poisoning attacks on deep neural nets","volume-title":"Proceedings of the 36th International Conference on Machine Learning","volume":"97","author":"Zhu"}],"event":{"name":"2023 IEEE Symposium on Security and Privacy (SP)","location":"San Francisco, CA, USA","start":{"date-parts":[[2023,5,21]]},"end":{"date-parts":[[2023,5,25]]}},"container-title":["2023 IEEE Symposium on Security and Privacy (SP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10179215\/10179280\/10179451.pdf?arnumber=10179451","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,7,20]],"date-time":"2024-07-20T05:16:29Z","timestamp":1721452589000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10179451\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,5]]},"references-count":62,"URL":"https:\/\/doi.org\/10.1109\/sp46215.2023.10179451","relation":{},"subject":[],"published":{"date-parts":[[2023,5]]}}}