{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T14:32:20Z","timestamp":1770820340566,"version":"3.50.1"},"reference-count":107,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"}],"funder":[{"DOI":"10.13039\/501100001409","name":"Department of Science and Technology Ministry of Science and Technology","doi-asserted-by":"publisher","award":["Swarnajayanti Fellowship"],"award-info":[{"award-number":["Swarnajayanti Fellowship"]}],"id":[{"id":"10.13039\/501100001409","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Dependable and Secure Comput."],"published-print":{"date-parts":[[2020]]},"DOI":"10.1109\/tdsc.2020.3027183","type":"journal-article","created":{"date-parts":[[2020,9,29]],"date-time":"2020-09-29T22:51:46Z","timestamp":1601419906000},"page":"1-1","source":"Crossref","is-referenced-by-count":38,"title":["Image Transformation based Defense Against Adversarial Perturbation on Deep Learning Models"],"prefix":"10.1109","author":[{"given":"Akshay","family":"Agarwal","sequence":"first","affiliation":[]},{"given":"Richa","family":"Singh","sequence":"additional","affiliation":[]},{"given":"Mayank","family":"Vatsa","sequence":"additional","affiliation":[]},{"given":"Nalini K.","family":"Ratha","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","article-title":"Countering adversarial images using input transformations","author":"guo","year":"2018","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref38","article-title":"Towards deep neural network architectures robust to adversarial examples","author":"gu","year":"2014"},{"key":"ref33","article-title":"Explaining and harnessing adversarial examples","author":"goodfellow","year":"2014"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1145\/3134599"},{"key":"ref31","article-title":"Defense against the dark arts: An overview of adversarial example security research and future research directions","author":"goodfellow","year":"2018"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/BTAS.2018.8698567"},{"key":"ref37","article-title":"On the (statistical) detection of adversarial examples","author":"grosse","year":"2017"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2009.08.002"},{"key":"ref35","first-page":"6829","article-title":"Unravelling robustness of deep learning based face recognition against adversarial attacks","author":"goswami","year":"2018","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-019-01160-w"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/BTAS46853.2019.9185999"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2019.00341"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00019"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/BF00994018"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref21","article-title":"Keeping the bad guys out: Protecting and vaccinating deep learning with JPEG compression","author":"das","year":"2017"},{"key":"ref24","article-title":"Detecting adversarial samples from artifacts","author":"feinman","year":"2017"},{"key":"ref23","article-title":"A study of the effect of JPG compression on adversarial images","author":"dziugaite","year":"2016"},{"key":"ref101","doi-asserted-by":"publisher","DOI":"10.1109\/JSTARS.2014.2375066"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.3301541"},{"key":"ref100","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.153"},{"key":"ref25","article-title":"NIST special databse 32-multiple encounter dataset II","author":"founds","year":"2011"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2018.2866197"},{"key":"ref51","article-title":"MNIST handwritten digit database","author":"lecun","year":"2010"},{"key":"ref59","article-title":"No need to worry about adversarial examples in object detection in autonomous vehicles","author":"lu","year":"2017","journal-title":"Proc CVPR Spotlight Oral Workshop"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.56"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00496"},{"key":"ref56","first-page":"655","article-title":"Principled detection of out-of-distribution examples in neural networks","author":"liang","year":"2018","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/TDSC.2018.2874243"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.615"},{"key":"ref53","first-page":"7167","article-title":"A simple unified framework for detecting out-of-distribution samples and adversarial attacks","author":"lee","year":"2018","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref52","article-title":"Generative adversarial trainer: Defense to adversarial perturbations with GAN","author":"lee","year":"2017"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00526"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00395"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00331"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2018.2807385"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00357"},{"key":"ref8","first-page":"284","article-title":"Synthesizing robust adversarial examples","author":"athalye","year":"2018","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref49","article-title":"Adversarial machine learning at scale","author":"kurakin","year":"2016"},{"key":"ref7","first-page":"274","article-title":"Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples","author":"athalye","year":"2018","journal-title":"Proc 35th Int Conf Mach Learn"},{"key":"ref9","article-title":"Adversarial transformation networks: Learning to generate adversarial examples","author":"baluja","year":"2017"},{"key":"ref46","article-title":"Learning multiple layers of features from tiny images","author":"krizhevsky","year":"2009"},{"key":"ref45","first-page":"5283","article-title":"Provable defenses against adversarial examples via the convex outer adversarial polytope","author":"kolter","year":"2018","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref48","article-title":"Adversarial examples in the physical world","author":"kurakin","year":"2016"},{"key":"ref47","first-page":"1097","article-title":"ImageNet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref42","article-title":"Blocking transferability of adversarial examples in black-box learning systems","author":"hosseini","year":"2017"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2012.2205597"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-019-0058-8"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.243"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.5244\/C.29.41"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2016.41"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/EuroSP.2016.36"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1023\/A:1011139631724"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00465"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00894"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.6028\/NIST.IR.7607"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1109\/INDIN.2005.1560462"},{"key":"ref78","article-title":"Certified defenses against adversarial examples","author":"raghunathan","year":"2018","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1016\/j.eng.2019.12.012"},{"key":"ref60","article-title":"Foveation-based mechanisms alleviate adversarial examples","author":"luo","year":"2016"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2019.23415"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-019-0018-3"},{"key":"ref63","article-title":"Characterizing adversarial subspaces using local intrinsic dimensionality","author":"ma","year":"2018","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1145\/3133956.3134057"},{"key":"ref65","article-title":"On detecting adversarial perturbations","author":"metzen","year":"2017","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref66","doi-asserted-by":"crossref","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.17"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.282"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/BTAS.2018.8698548"},{"key":"ref69","article-title":"Fast feature fool: A data independent approach to universal adversarial perturbations","author":"mopuri","year":"2017","journal-title":"Proc Brit Mach Vis Conf"},{"key":"ref1","article-title":"How drive.ai is mastering autonomous driving with deep learning","author":"ackerman","year":"2017","journal-title":"IEEE Spectr"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00871"},{"key":"ref94","article-title":"Ensemble adversarial training: Attacks and defenses","author":"tram\u00e8r","year":"2018","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref107","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00021"},{"key":"ref93","first-page":"4139","article-title":"Detecting adversarial examples through image transformation","author":"tian","year":"2018","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"ref106","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33015869"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01148"},{"key":"ref105","first-page":"684","article-title":"Defending against whitebox adversarial attacks via randomized discretization","author":"zhang","year":"2019","journal-title":"Proc Int Conf Artif Intell Statist"},{"key":"ref91","article-title":"Intriguing properties of neural networks","author":"szegedy","year":"2014","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref104","doi-asserted-by":"publisher","DOI":"10.1109\/SIPROCESS.2018.8600516"},{"key":"ref90","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"ref103","doi-asserted-by":"publisher","DOI":"10.1145\/3128572.3140449"},{"key":"ref102","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2018.2886017"},{"key":"ref98","article-title":"Spatially transformed adversarial examples","author":"xiao","year":"2018"},{"key":"ref99","article-title":"Mitigating adversarial effects through randomization","author":"xie","year":"2018","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/833"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.1109\/TIFS.2018.2833032"},{"key":"ref10","article-title":"Dimensionality reduction as a defense against evasion attacks on machine learning classifiers","author":"bhagoji","year":"2017"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1080\/2151237X.2007.10129236"},{"key":"ref12","article-title":"Adversarial patch","author":"brown","year":"2017","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref13","article-title":"Copy and paste: A simple but effective initialization method for black-box adversarial attacks","author":"brunner","year":"2019","journal-title":"Proc CVPR Workshop Adversarial Mach Learn Real-World Comput Vis Syst"},{"key":"ref14","first-page":"132330","article-title":"Anomalous instance detection in deep learning: A survey","volume":"8","author":"bulusu","year":"2020"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/3128572.3140444"},{"key":"ref16","article-title":"Magnet and &#x201C;efficient defenses against adversarial attacks&#x201D; are not robust to adversarial examples","author":"carlini","year":"2017"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1145\/2976749.2978392"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.49"},{"key":"ref81","article-title":"Defense-GAN: Protecting classifiers against adversarial attacks using generative models","author":"samangouei","year":"2018","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref18","first-page":"10","article-title":"EAD: Elastic-net attacks to deep neural networks via adversarial examples","author":"chen","year":"2018","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"ref84","article-title":"Very deep convolutional networks for large-scale image recognition","author":"simonyan","year":"2014"},{"key":"ref19","article-title":"Sensitivity of deep convolutional networks to Gabor noise","author":"co","year":"2019","journal-title":"Proc ICML Workshop Identifying Understanding Deep Learn Phenom"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1145\/3317611"},{"key":"ref80","first-page":"1660","article-title":"Improving the adversarial robustness and interpretability of deep neural networks by regularizing their input gradients","author":"ross","year":"2018","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1109\/TEVC.2019.2890858"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i09.7085"},{"key":"ref86","article-title":"Certifiable distributional robustness with principled adversarial training","author":"sinha","year":"2018","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref87","article-title":"PixelDefend: Leveraging generative models to understand and defend against adversarial examples","author":"song","year":"2018","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-018-0006-z"}],"container-title":["IEEE Transactions on Dependable and Secure Computing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8858\/4358699\/09207872.pdf?arnumber=9207872","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,12]],"date-time":"2022-01-12T16:06:33Z","timestamp":1642003593000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9207872\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"references-count":107,"URL":"https:\/\/doi.org\/10.1109\/tdsc.2020.3027183","relation":{},"ISSN":["1545-5971","1941-0018","2160-9209"],"issn-type":[{"value":"1545-5971","type":"print"},{"value":"1941-0018","type":"electronic"},{"value":"2160-9209","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020]]}}}