{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T17:22:31Z","timestamp":1771953751878,"version":"3.50.1"},"reference-count":73,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"8","license":[{"start":{"date-parts":[[2022,8,1]],"date-time":"2022-08-01T00:00:00Z","timestamp":1659312000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,8,1]],"date-time":"2022-08-01T00:00:00Z","timestamp":1659312000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,8,1]],"date-time":"2022-08-01T00:00:00Z","timestamp":1659312000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Visvesvaraya Ph.D. Fellowship"},{"name":"MeitY, India"},{"name":"MeitY, India"},{"name":"Swarnajayanti Fellowship by the Government of India"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2022,8]]},"DOI":"10.1109\/tnnls.2021.3051529","type":"journal-article","created":{"date-parts":[[2021,3,12]],"date-time":"2021-03-12T20:34:50Z","timestamp":1615581290000},"page":"3277-3289","source":"Crossref","is-referenced-by-count":14,"title":["DAMAD: Database, Attack, and Model Agnostic Adversarial Perturbation Detector"],"prefix":"10.1109","volume":"33","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-7362-4752","authenticated-orcid":false,"given":"Akshay","family":"Agarwal","sequence":"first","affiliation":[{"name":"Department of Computer Science Engineering, IIIT Delhi, New Delhi, India"}]},{"given":"Gaurav","family":"Goswami","sequence":"additional","affiliation":[{"name":"IBM Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5952-2274","authenticated-orcid":false,"given":"Mayank","family":"Vatsa","sequence":"additional","affiliation":[{"name":"Department of Computer Science Engineering, IIT Jodhpur, Jheepasani, India"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4060-4573","authenticated-orcid":false,"given":"Richa","family":"Singh","sequence":"additional","affiliation":[{"name":"Department of Computer Science Engineering, IIT Jodhpur, Jheepasani, India"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7913-5722","authenticated-orcid":false,"given":"Nalini K.","family":"Ratha","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, SUNY-Buffalo, Buffalo, NY, USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/BTAS.2016.7791171"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/BTAS.2018.8698548"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00331"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00395"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2018.2807385"},{"key":"ref6","first-page":"1","article-title":"Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples","volume-title":"Proc. ICML","author":"Athalye"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/BTAS.2013.6712704"},{"key":"ref8","article-title":"Enhancing robustness of machine learning systems via data transformations","volume-title":"arXiv:1704.02654","author":"Nitin Bhagoji","year":"2017"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3128572.3140444"},{"key":"ref10","article-title":"MagNet and \u2018Efficient defenses against adversarial Attacks\u2019 are not robust to adversarial examples","volume-title":"arXiv:1711.08478","author":"Carlini","year":"2017"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.49"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/j.cose.2019.04.014"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/j.cose.2020.101916"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.ins.2020.04.019"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11302"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/BF00994018"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref18","article-title":"Detecting adversarial samples from artifacts","volume-title":"arXiv:1703.00410","author":"Feinman","year":"2017"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.6028\/nist.ir.7807"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2019.00341"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/BTAS46853.2019.9185999"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00019"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/BTAS.2018.8698567"},{"key":"ref24","article-title":"Adversarial and clean data are not twins","volume-title":"arXiv:1704.04960","author":"Gong","year":"2017"},{"key":"ref25","first-page":"1","article-title":"Explaining and harnessing adversarial examples","volume-title":"Proc. ICLR","author":"Goodfellow"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-019-01160-w"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.12341"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2009.08.002"},{"key":"ref29","article-title":"On the (Statistical) detection of adversarial examples","volume-title":"arXiv:1702.06280","author":"Grosse","year":"2017"},{"key":"ref30","first-page":"1","article-title":"Countering adversarial images using input transformations","volume-title":"Proc. ICLR","author":"Guo"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.1973.4309314"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref33","first-page":"1","article-title":"A baseline for detecting misclassified and out-of-distribution examples in neural networks","volume-title":"Proc. ICLR","author":"Hendrycks"},{"key":"ref34","first-page":"1","article-title":"Early methods for detecting adversarial images","volume-title":"Proc. ICLR (Workshop Track)","author":"Hendrycks"},{"key":"ref35","first-page":"1","article-title":"Distilling the knowledge in a neural network","volume-title":"Proc. NIPS Deep Learn. Workshop","author":"Hinton"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.243"},{"key":"ref37","first-page":"32","article-title":"Learning multiple layers of features from tiny images","author":"Krizhevsky","year":"2009"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1201\/9781351251389-8"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/5.726791"},{"key":"ref40","first-page":"7167","article-title":"A simple unified framework for detecting out-of-distribution samples and adversarial attacks","volume-title":"Proc. NIPS","author":"Lee"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.615"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TDSC.2018.2874243"},{"key":"ref43","first-page":"1","article-title":"Principled detection of out-of-distribution examples in neural networks","volume-title":"Proc. ICLR","author":"Liang"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00496"},{"key":"ref45","first-page":"1","article-title":"Characterizing adversarial subspaces using local intrinsic dimensionality","volume-title":"Proc. ICLR","author":"Ma"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.06083"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1145\/3133956.3134057"},{"key":"ref48","first-page":"1","article-title":"On detecting adversarial perturbations","volume-title":"Proc. ICLR","author":"Metzen"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.17"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.282"},{"key":"ref51","article-title":"Adversarial robustness toolbox v1.0.0","volume-title":"arXiv:1807.01069","author":"Nicolae","year":"2018"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2002.1017623"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2016.41"},{"key":"ref54","first-page":"1","article-title":"Certified defenses against adversarial examples","volume-title":"Proc. ICLR","author":"Raghunathan"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1016\/S0167-8655(03)00079-5"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/K18-2019"},{"key":"ref57","first-page":"1","article-title":"Defense-GAN: Protecting classifiers against adversarial attacks using generative models","volume-title":"Proc. ICLR","author":"Samangouei"},{"key":"ref58","article-title":"Very deep convolutional networks for large-scale image recognition","volume-title":"arXiv:1409.1556","author":"Simonyan","year":"2014"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2018.12.003"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i09.7085"},{"key":"ref61","first-page":"1","article-title":"Certifiable distributional robustness with principled adversarial training","volume-title":"Proc. ICLR","author":"Sinha"},{"key":"ref62","first-page":"1","article-title":"Pixeldefend: Leveraging generative models to understand and defend against adversarial examples","volume-title":"Proc. ICLR","author":"Song"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/TEVC.2019.2890858"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"ref65","first-page":"1","article-title":"Intriguing properties of neural networks","volume-title":"Proc. ICLR","author":"Szegedy"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/TETCI.2019.2917704"},{"key":"ref67","first-page":"1","article-title":"Ensemble adversarial training: Attacks and defenses","volume-title":"Proc. ICLR","author":"Tram\u00e8r"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1016\/0169-7439(87)80084-9"},{"key":"ref69","article-title":"Fashion-MNIST: A novel image dataset for benchmarking machine learning algorithms","volume-title":"arXiv:1708.07747","author":"Xiao","year":"2017"},{"key":"ref70","first-page":"1","article-title":"Mitigating adversarial effects through randomization","volume-title":"Proc. ICLR","author":"Xie"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2018.23198"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2018.2886017"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/TDSC.2020.3027183"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/9849214\/09377649.pdf?arnumber=9377649","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,9]],"date-time":"2024-01-09T23:33:11Z","timestamp":1704843191000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9377649\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,8]]},"references-count":73,"journal-issue":{"issue":"8"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2021.3051529","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,8]]}}}