{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T08:10:50Z","timestamp":1775549450400,"version":"3.50.1"},"reference-count":72,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2025,4]]},"DOI":"10.1109\/tnnls.2024.3386642","type":"journal-article","created":{"date-parts":[[2024,4,22]],"date-time":"2024-04-22T17:40:16Z","timestamp":1713807616000},"page":"6679-6692","source":"Crossref","is-referenced-by-count":8,"title":["On the Robustness of Bayesian Neural Networks to Adversarial Attacks"],"prefix":"10.1109","volume":"36","author":[{"given":"Luca","family":"Bortolussi","sequence":"first","affiliation":[{"name":"Department of Mathematics, Informatics and Geosciences, University of Trieste, Trieste, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7532-6249","authenticated-orcid":false,"given":"Ginevra","family":"Carbone","sequence":"additional","affiliation":[{"name":"Department of Mathematics and Geosciences, University of Trieste, Trieste, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1190-6097","authenticated-orcid":false,"given":"Luca","family":"Laurenti","sequence":"additional","affiliation":[{"name":"Delft Center for Systems and Control, TU Delft University, Delft, The Netherlands"}]},{"given":"Andrea","family":"Patane","sequence":"additional","affiliation":[{"name":"School of Computer Science and Statistics, Trinity College, Dublin, Ireland"}]},{"given":"Guido","family":"Sanguinetti","sequence":"additional","affiliation":[{"name":"SISSA, International School for Advanced Studies, Trieste, Italy"}]},{"given":"Matthew","family":"Wicker","sequence":"additional","affiliation":[{"name":"Department of Computer Science, University of Oxford, Oxford, U.K."}]}],"member":"263","reference":[{"key":"ref1","article-title":"Intriguing properties of neural networks","author":"Szegedy","year":"2013","journal-title":"arXiv:1312.6199"},{"key":"ref2","article-title":"Explaining and harnessing adversarial examples","author":"Goodfellow","year":"2014","journal-title":"arXiv:1412.6572"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2019.2933524"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/3243734.3264418"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3251999"},{"key":"ref6","article-title":"Towards deep learning models resistant to adversarial attacks","author":"Madry","year":"2017","journal-title":"arXiv:1706.06083"},{"key":"ref7","volume":"118","author":"Neal","year":"2012","journal-title":"Bayesian Learning for Neural Networks"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3265533"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.3017292"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2015.2499302"},{"key":"ref11","article-title":"Detecting adversarial samples from artifacts","author":"Feinman","year":"2017","journal-title":"arXiv:1703.00410"},{"key":"ref12","first-page":"2431","article-title":"Bayesian inference with certifiable adversarial robustness","volume-title":"Proc. Int. Conf. Artif. Intell. Stat.","author":"Wicker"},{"key":"ref13","article-title":"Bayesian adversarial spheres: Bayesian inference and adversarial examples in a noiseless setting","author":"Bekasov","year":"2018","journal-title":"arXiv:1811.12335"},{"key":"ref14","article-title":"Adv-BNN: Improved adversarial defense through robust Bayesian neural network","author":"Liu","year":"2018","journal-title":"arXiv:1810.01279"},{"key":"ref15","article-title":"Gradient-free adversarial attacks for Bayesian neural networks","author":"Yuan","year":"2020","journal-title":"arXiv:2012.12640"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/978-0-387-21752-9_1"},{"key":"ref17","first-page":"1613","article-title":"Weight uncertainty in neural networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Blundell"},{"key":"ref18","first-page":"15602","article-title":"Robustness of Bayesian neural networks to gradient-based attacks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Carbone"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3111892"},{"key":"ref20","article-title":"Adversarial examples in modern machine learning: A review","author":"Reza Wiyatno","year":"2019","journal-title":"arXiv:1911.05268"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2018.2886017"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.3025954"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/3370748.3406585"},{"key":"ref24","first-page":"1","article-title":"Adversarial examples are not bugs, they are features","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Ilyas"},{"key":"ref25","article-title":"Defensive quantization: When efficiency meets robustness","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Lin"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3299408"},{"key":"ref27","article-title":"Robustness may be at odds with accuracy","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Tsipras"},{"key":"ref28","article-title":"Evaluating the robustness of Bayesian neural networks against different types of attacks","author":"Pang","year":"2021","journal-title":"arXiv:2106.09223"},{"key":"ref29","article-title":"Understanding measures of uncertainty for adversarial example detection","author":"Smith","year":"2018","journal-title":"arXiv:1803.08533"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/AIKE52691.2021.00017"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9196844"},{"key":"ref32","article-title":"Sufficient conditions for idealised models to have no adversarial examples: A theoretical and empirical study with Bayesian neural networks","author":"Gal","year":"2018","journal-title":"arXiv:1806.00667"},{"key":"ref33","article-title":"Adversarial phenomenon in the eyes of Bayesian deep learning","author":"Rawat","year":"2017","journal-title":"arXiv:1711.08244"},{"key":"ref34","first-page":"3","article-title":"Adversarial examples are not easily detected: Bypassing ten detection methods","volume-title":"Proc. 10th ACM Workshop Artif. Intell. Secur.","author":"Carlini"},{"key":"ref35","article-title":"The limitations of model uncertainty in adversarial settings","author":"Grosse","year":"2018","journal-title":"arXiv:1812.02606"},{"key":"ref36","first-page":"1198","article-title":"Probabilistic safety for Bayesian neural networks","volume-title":"Proc. Conf. Uncertainty Artif. Intell.","author":"Wicker"},{"key":"ref37","article-title":"Make sure you\u2019re unsure: A framework for verifying probabilistic specifications","author":"Berrada","year":"2021","journal-title":"arXiv:2102.09479"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00381"},{"key":"ref39","first-page":"6892","article-title":"Bayesian adversarial learning","volume-title":"Proc. 32nd Int. Conf. Neural Inf. Process. Syst.","author":"Ye"},{"issue":"4","key":"ref40","doi-asserted-by":"crossref","first-page":"303","DOI":"10.1007\/BF02551274","article-title":"Approximation by superpositions of a sigmoidal function","volume":"2","author":"Cybenko","year":"1989","journal-title":"Math. Control, Signals, Syst."},{"issue":"2","key":"ref41","doi-asserted-by":"crossref","first-page":"251","DOI":"10.1016\/0893-6080(91)90009-T","article-title":"Approximation capabilities of multilayer feedforward networks","volume":"4","author":"Hornik","year":"1991","journal-title":"Neural Netw."},{"key":"ref42","article-title":"Gaussian process behaviour in wide deep neural networks","author":"Matthews","year":"2018","journal-title":"arXiv:1804.11271"},{"key":"ref43","first-page":"1","article-title":"A function space view of bounded norm infinite width ReLU nets: The multivariate case","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Ongie"},{"key":"ref44","article-title":"Neural networks as interacting particle systems: Asymptotic convexity of the loss landscape and universal scaling of the approximation error","author":"Rotskoff","year":"2018","journal-title":"arXiv:1805.00915"},{"key":"ref45","doi-asserted-by":"crossref","DOI":"10.1017\/CBO9780511804779","volume-title":"Bayesian Reasoning and Machine Learning","author":"Barber","year":"2012"},{"key":"ref46","volume-title":"Pattern Recognition and Machine Learning","author":"Bishop","year":"2006"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1201\/b10905-6"},{"key":"ref48","doi-asserted-by":"crossref","DOI":"10.1137\/1.9780898718980","volume-title":"The Geometry of Random Fields","author":"Adler","year":"2010"},{"key":"ref49","volume-title":"Convergence of Probability Measures","author":"Billingsley","year":"2013"},{"key":"ref50","article-title":"Deep neural networks as Gaussian processes","author":"Lee","year":"2017","journal-title":"arXiv:1711.00165"},{"key":"ref51","article-title":"Deep convolutional networks as shallow Gaussian processes","author":"Garriga-Alonso","year":"2018","journal-title":"arXiv:1808.05587"},{"key":"ref52","first-page":"1178","article-title":"Adversarial vulnerability for any classifier","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Fawzi"},{"key":"ref53","article-title":"On the geometry of adversarial examples","author":"Khoury","year":"2018","journal-title":"arXiv:1811.00525"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1103\/physrevx.10.041044"},{"key":"ref55","first-page":"314","article-title":"Fairwashing explanations with off-manifold detergent","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Anders"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2019.2957109"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.7551\/mitpress\/3206.001.0001"},{"key":"ref58","article-title":"Exact posterior distributions of wide Bayesian neural networks","author":"Hron","year":"2020","journal-title":"arXiv:2006.10541"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1063\/1.3034123"},{"key":"ref60","first-page":"1","article-title":"Bayesian deep convolutional networks with many channels are Gaussian processes","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Novak"},{"issue":"74","key":"ref61","first-page":"1","article-title":"All you need is a good functional prior for Bayesian deep learning","volume":"23","author":"Tran","year":"2022","journal-title":"J. Mach. Learn. Res."},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33017759"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR48806.2021.9413290"},{"key":"ref64","first-page":"1","article-title":"Adversarial robustness guarantees for Gaussian processes","volume":"23","author":"Patane","year":"2022","journal-title":"J. Mach. Learn. Res."},{"key":"ref65","article-title":"Fashion-MNIST: A novel image dataset for benchmarking machine learning algorithms","author":"Xiao","year":"2017","journal-title":"arXiv:1708.07747"},{"key":"ref66","article-title":"Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples","author":"Athalye","year":"2018","journal-title":"arXiv:1802.00420"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.49"},{"key":"ref68","first-page":"631","article-title":"Is robustness the cost of accuracy?\u2014A comprehensive study on the robustness of 18 deep image classification models","volume-title":"Proc. Eur. Conf. Comput. Vis. (ECCV)","author":"Su"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/789"},{"key":"ref70","first-page":"1","article-title":"A simple baseline for Bayesian uncertainty in deep learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Maddox"},{"key":"ref71","first-page":"5852","article-title":"Noisy natural gradient as variational inference","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Zhang"},{"key":"ref72","first-page":"15","article-title":"ZOO: Zeroth order optimization based black-box attacks to deep neural networks without training substitute models","volume-title":"Proc. 10th ACM Workshop Artif. Intell. Secur.","author":"Chen"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/10949581\/10506195.pdf?arnumber=10506195","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,5]],"date-time":"2025-04-05T05:19:16Z","timestamp":1743830356000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10506195\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4]]},"references-count":72,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2024.3386642","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,4]]}}}