{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,3]],"date-time":"2026-03-03T07:50:25Z","timestamp":1772524225721,"version":"3.50.1"},"reference-count":111,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2018,1,1]],"date-time":"2018-01-01T00:00:00Z","timestamp":1514764800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/OAPA.html"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61702539"],"award-info":[{"award-number":["61702539"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61728201"],"award-info":[{"award-number":["61728201"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2018]]},"DOI":"10.1109\/access.2018.2805680","type":"journal-article","created":{"date-parts":[[2018,2,13]],"date-time":"2018-02-13T19:22:47Z","timestamp":1518549767000},"page":"12103-12117","source":"Crossref","is-referenced-by-count":341,"title":["A Survey on Security Threats and Defensive Techniques of Machine Learning: A Data Driven View"],"prefix":"10.1109","volume":"6","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-2922-3518","authenticated-orcid":false,"given":"Qiang","family":"Liu","sequence":"first","affiliation":[]},{"given":"Pan","family":"Li","sequence":"additional","affiliation":[]},{"given":"Wentao","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Wei","family":"Cai","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4485-6743","authenticated-orcid":false,"given":"Shui","family":"Yu","sequence":"additional","affiliation":[]},{"given":"Victor C. M.","family":"Leung","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1142\/S0218001414600027"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-34166-3_46"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/EuroSP.2016.36"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.282"},{"key":"ref31","first-page":"427","article-title":"Deep neural networks are easily fooled: High confidence predictions for unrecognizable images","author":"nguyen","year":"2015","journal-title":"Proc IEEE Conf Comput Vis Pattern Recognit (CVPR)"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2017.172"},{"key":"ref37","first-page":"1452","article-title":"Data poisoning attacks against autoregressive models","author":"alfeld","year":"2016","journal-title":"Proc AAAI Conf Artif Intell (AAAI)"},{"key":"ref36","first-page":"1885","article-title":"Data poisoning attacks on factorization-based collaborative filtering","author":"li","year":"2016","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref35","author":"kurakin","year":"2017","journal-title":"Adversarial examples in the physical world"},{"key":"ref34","author":"liu","year":"2017","journal-title":"Delving into transferable adversarial examples and black-box attacks"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1145\/2517312.2517321"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-34620-0_21"},{"key":"ref29","author":"szegedy","year":"2014","journal-title":"Intriguing properties of neural networks"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1145\/2810103.2813677"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1016\/j.ins.2013.03.022"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1145\/2909827.2930784"},{"key":"ref24","first-page":"1","article-title":"Detection of malicious PDF files based on hierarchical document structure","author":"\u0161rndi?","year":"2013","journal-title":"Proc 20th Annu Netw Distrib Syst Security Symp"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2014.2320725"},{"key":"ref101","first-page":"1054","article-title":"RAPPOR: Randomized aggregatable privacy-preserving ordinal response","author":"pihur","year":"2014","journal-title":"Proc ACM SIGSAC Conf Comput Commun Secur"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1145\/2666652.2666666"},{"key":"ref100","doi-asserted-by":"publisher","DOI":"10.1007\/11787006_1"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-662-44415-3_5"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2017\/551"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2014.08.081"},{"key":"ref59","author":"masha","year":"2016","journal-title":"Adversarial Attacks on Image Recognition"},{"key":"ref58","author":"grosse","year":"2016","journal-title":"Adversarial perturbations against deep neural networks for malware classification"},{"key":"ref57","first-page":"2087","article-title":"Feature cross-substitution in adversarial classification","author":"li","year":"2014","journal-title":"Proc 27th Int Conf Neural Inf Process Syst (NIPS)"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2015.2415032"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-40994-3_25"},{"key":"ref54","author":"rosenberg","year":"2017","journal-title":"Generic black-box end-to-end attack against state of the art API call based malware classifiers"},{"key":"ref53","author":"papernot","year":"2016","journal-title":"Transferability in machine learning from phenomena to black-box attacks using adversarial samples"},{"key":"ref52","author":"tram\u00e8r","year":"2017","journal-title":"The space of transferable adversarial examples"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1016\/j.patrec.2012.11.006"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2014.2325029"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.bdr.2015.04.001"},{"key":"ref6","first-page":"1","article-title":"On attacking statistical spam filters","author":"wittel","year":"2004","journal-title":"Proc 1st Conf Email and Anti-Spam"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2017.2696365"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/2976749.2978392"},{"key":"ref49","author":"hu","year":"2017","journal-title":"Generating Adversarial Malware Examples for Black-Box Attacks Based on GAN"},{"key":"ref7","first-page":"1","article-title":"Good word attacks on statistical spam filters","author":"lowd","year":"2005","journal-title":"Proc 2nd Conf Email Anti-Spam"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICB.2013.6613006"},{"key":"ref46","first-page":"681","article-title":"The security of latent Dirichlet allocation","author":"mei","year":"2015","journal-title":"Proc Int Conf Artif Intell Statist"},{"key":"ref45","author":"yang","year":"2017","journal-title":"Generative poisoning attack method against neural networks"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/JBHI.2014.2344095"},{"key":"ref47","first-page":"1689","article-title":"Is feature selection secure against training data poisoning?","author":"xiao","year":"2015","journal-title":"Proc Int Conf Int Conf Mach Learn (ICML)"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1145\/3133956.3134083"},{"key":"ref41","first-page":"3681","article-title":"Security analysis of online centroid anomaly detection","volume":"13","author":"kloft","year":"2012","journal-title":"J Mach Learn Res"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1145\/3041008.3041012"},{"key":"ref43","first-page":"1467","article-title":"Poisoning attacks against support vector machines","author":"biggio","year":"2012","journal-title":"Proc 29th Int Conf Int Conf Mach Learn (ICML)"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-02300-7_4"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-017-5663-3"},{"key":"ref71","first-page":"601","article-title":"Stealing machine learning models via prediction APIs","author":"tram\u00e8r","year":"2016","journal-title":"Proc 25th Usenix Security Symp"},{"key":"ref70","first-page":"17","article-title":"Privacy in pharmacogenetics: An end-to-end case study of personalized warfarin dosing","author":"fredrikson","year":"2014","journal-title":"Proc Usenix Secur Symp"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1145\/2046684.2046692"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1145\/1644893.1644895"},{"key":"ref74","doi-asserted-by":"crossref","first-page":"3525","DOI":"10.1007\/978-0-387-39940-9_466","article-title":"What-if analysis","author":"rizzi","year":"2009","journal-title":"Encyclopedia of Database Systems"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1145\/1654988.1654990"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1007\/s13042-010-0007-7"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-21557-5_37"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1145\/2484313.2484327"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/CISDA.2007.368148"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2016.23115"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2014.20"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1145\/3052973.3053009"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.49"},{"key":"ref66","author":"mopuri","year":"2017","journal-title":"Fast feature fool A data independent approach to universal adversarial perturbations"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.17"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.41"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/CSF.2016.32"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2016.2577036"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2017.01.026"},{"key":"ref109","first-page":"201","article-title":"CryptoNets: Applying neural networks to encrypted data with high throughput and accuracy","author":"dowlin","year":"2016","journal-title":"Proc 33rd Int Conf Mach Learn"},{"key":"ref95","author":"bhagoji","year":"2017","journal-title":"Enhancing robustness of machine learning systems via data transformations"},{"key":"ref108","first-page":"81","article-title":"Investigation on distributed K-means clustering algorithm of homomorphic encryption","volume":"2","author":"yao","year":"2017","journal-title":"Computer Technology and Development"},{"key":"ref94","author":"carlini","year":"2016","journal-title":"Defensive distillation is not robust to adversarial examples"},{"key":"ref107","author":"aslett","year":"2015","journal-title":"Encrypted statistical machine learning New privacy preserving methods"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2016.41"},{"key":"ref106","first-page":"643","article-title":"Multiparty Computation from Somewhat Homomorphic Encryption","author":"damg\u00e5rd","year":"2012","journal-title":"Proc 32nd Annu Cryptol Conf Adv Cryptol (CRYPTO)"},{"key":"ref92","author":"gu","year":"2015","journal-title":"Towards deep neural network architectures robust to adversarial examples"},{"key":"ref105","doi-asserted-by":"publisher","DOI":"10.1109\/ICME.2014.6890141"},{"key":"ref91","author":"goodfellow","year":"2015","journal-title":"Explaining and Harnessing Adversarial Examples"},{"key":"ref104","first-page":"918","article-title":"Differentially private Bayesian optimization","author":"kusner","year":"2015","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref90","author":"metzen","year":"2017","journal-title":"On detecting adversarial perturbations"},{"key":"ref103","doi-asserted-by":"publisher","DOI":"10.1145\/2976749.2978318"},{"key":"ref102","doi-asserted-by":"publisher","DOI":"10.29012\/jpc.v4i1.612"},{"key":"ref111","doi-asserted-by":"publisher","DOI":"10.1145\/1989323.1989345"},{"key":"ref110","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2016.2606428"},{"key":"ref98","author":"tram\u00e8r","year":"2018","journal-title":"Ensemble adversarial training Attacks and defenses"},{"key":"ref99","author":"abbasi","year":"2017","journal-title":"Robustness to adversarial examples through an ensemble of specialists"},{"key":"ref96","author":"grosse","year":"2017","journal-title":"On the (statistical) detection of adversarial examples"},{"key":"ref97","author":"sengupta","year":"2017","journal-title":"MTDeep Boosting the security of deep neural nets against adversarial attacks with moving target defense"},{"key":"ref10","author":"papernot","year":"2016","journal-title":"Practical black-box attacks against machine learning"},{"key":"ref11","first-page":"513","article-title":"Hidden voice commands","author":"carlini","year":"2016","journal-title":"Proc 25th Usenix Security Symp"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/1014052.1014066"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/1081870.1081950"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/1128817.1128824"},{"key":"ref15","author":"amodei","year":"2016","journal-title":"Concrete problems in ai safety"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1109\/TDSC.2017.2700270"},{"key":"ref16","author":"papernot","year":"2016","journal-title":"Towards the science of security and privacy in machine learning"},{"key":"ref81","author":"laishram","year":"2016","journal-title":"Curie A method for protecting SVM classifier from poisoning attack"},{"key":"ref17","article-title":"A survey of machine learning for big data processing","volume":"2016","author":"qiu","year":"2016","journal-title":"EURASIP J Adv Signal Process"},{"key":"ref84","first-page":"1489","article-title":"Convex learning with invariances","author":"teo","year":"2007","journal-title":"Proc 20th Int Conf Neural Inf Process Syst (NIPS)"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-010-5188-5"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143889"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2013.57"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1007\/978-0-387-88735-7_2"},{"key":"ref89","author":"feinman","year":"2017","journal-title":"Detecting adversarial samples from artifacts"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1145\/2020408.2020495"},{"key":"ref86","first-page":"2617","article-title":"Static prediction games for adversarial learning problems","volume":"13","author":"br\u00fcckner","year":"2012","journal-title":"J Mach Learn Res"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2016.2593488"},{"key":"ref88","author":"xu","year":"2017","journal-title":"Feature squeezing Detecting adversarial examples in deep neural networks"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/8274985\/08290925.pdf?arnumber=8290925","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,14]],"date-time":"2022-08-14T08:28:20Z","timestamp":1660465700000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/8290925\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018]]},"references-count":111,"URL":"https:\/\/doi.org\/10.1109\/access.2018.2805680","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2018]]}}}