{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T06:53:40Z","timestamp":1774508020755,"version":"3.50.1"},"reference-count":268,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"5","license":[{"start":{"date-parts":[[2022,5,1]],"date-time":"2022-05-01T00:00:00Z","timestamp":1651363200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,5,1]],"date-time":"2022-05-01T00:00:00Z","timestamp":1651363200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,1]],"date-time":"2022-05-01T00:00:00Z","timestamp":1651363200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Key Research and Development Program of China","award":["2020AAA0140001"],"award-info":[{"award-number":["2020AAA0140001"]}]},{"DOI":"10.13039\/501100004826","name":"Beijing Natural Science Foundation","doi-asserted-by":"publisher","award":["JQ18011"],"award-info":[{"award-number":["JQ18011"]}],"id":[{"id":"10.13039\/501100004826","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U1836211"],"award-info":[{"award-number":["U1836211"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61902395"],"award-info":[{"award-number":["61902395"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"National Top-notch Youth Talents Program of China"},{"DOI":"10.13039\/501100004739","name":"Youth Innovation Promotion Association of the Chinese Academy of Sciences","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100004739","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100005090","name":"Beijing Nova Program","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100005090","id-type":"DOI","asserted-by":"publisher"}]},{"name":"National Frontier Science and Technology Innovation","award":["YJKYYQ20170070"],"award-info":[{"award-number":["YJKYYQ20170070"]}]},{"name":"Beijing Academy of Artificial Intelligence"},{"name":"CCF-Tencent Open Fund"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IIEEE Trans. Software Eng."],"published-print":{"date-parts":[[2022,5,1]]},"DOI":"10.1109\/tse.2020.3034721","type":"journal-article","created":{"date-parts":[[2020,11,9]],"date-time":"2020-11-09T21:36:27Z","timestamp":1604957787000},"page":"1743-1770","source":"Crossref","is-referenced-by-count":74,"title":["Towards Security Threats of Deep Learning Systems: A Survey"],"prefix":"10.1109","volume":"48","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1238-1106","authenticated-orcid":false,"given":"Yingzhe","family":"He","sequence":"first","affiliation":[{"name":"Chinese Academy of Sciences, Institute of Information Engineering, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6388-2571","authenticated-orcid":false,"given":"Guozhu","family":"Meng","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences, Institute of Information Engineering, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5624-2987","authenticated-orcid":false,"given":"Kai","family":"Chen","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences, Institute of Information Engineering, Beijing, China"}]},{"given":"Xingbo","family":"Hu","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences, Institute of Information Engineering, Beijing, China"}]},{"given":"Jinwen","family":"He","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences, Institute of Information Engineering, Beijing, China"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/2976749.2978318"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/3338906.3338937"},{"key":"ref12","article-title":"Defense against universal adversarial perturbations","author":"Akhtar","year":"2017","journal-title":"CoRR"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2018.2807385"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/SPW.2018.00020"},{"key":"ref15","article-title":"Adversarial examples in the physical world","author":"Kurakin","year":"2016","journal-title":"CoRR"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10237"},{"key":"ref17","article-title":"Concrete problems in AI safety","author":"Amodei","year":"2016","journal-title":"CoRR"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/3314221.3314614"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1504\/IJSN.2015.071829"},{"key":"ref20","first-page":"284","article-title":"Synthesizing robust adversarial examples","volume-title":"Proc. 35th Int. Conf. Mach. Learn.","author":"Athalye"},{"key":"ref21","first-page":"622","article-title":"Adaptive watermarks: A concept drift-based approach for predicting event-time progress in data streams","volume-title":"Proc. 22nd Int. Conf. Extending Database Technol.","author":"Awad"},{"key":"ref22","article-title":"Security and privacy issues in deep learning","author":"Bae","year":"2018","journal-title":"CoRR"},{"key":"ref23","first-page":"2938","article-title":"How to backdoor federated learning","volume-title":"Proc. 23rd Int. Conf. Artif. Intell. Statist.","author":"Bagdasaryan"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11672"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3128572.3140450"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-010-5188-5"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/FOCS.2014.56"},{"key":"ref28","article-title":"What is structured data? webopedia definition","author":"Beal","year":"2018"},{"key":"ref29","first-page":"634","article-title":"Analyzing federated learning through an adversarial lens","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Bhagoji"},{"key":"ref30","article-title":"Unrestricted adversarial examples via semantic manipulation","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Bhattad"},{"key":"ref31","article-title":"Poisoning attacks against support vector machines","volume-title":"Proc. 29th Int. Conf. Mach. Learn.","author":"Biggio"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1145\/2517312.2517321"},{"key":"ref33","first-page":"695","article-title":"Adversarial attacks on node embeddings via graph poisoning","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Bojchevski"},{"key":"ref34","article-title":"Discrimination, artificial intelligence, and algorithmic decision-making","author":"Borgesius","year":"2018"},{"key":"ref35","first-page":"171","article-title":"Nash equilibria of static prediction games","volume-title":"Proc. 23rd Annu. Conf. Neural Inf. Process. Syst.","author":"Br\u00fcckner"},{"key":"ref36","first-page":"831","article-title":"Adversarial examples from computational constraints","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Bubeck"},{"key":"ref37","article-title":"Thermometer encoding: One hot way to resist adversarial examples","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Buckman"},{"key":"ref38","first-page":"77","article-title":"Gender shades: Intersectional accuracy disparities in commercial gender classification","volume-title":"Proc. Conf. Fairness Accountability Transparency","author":"Buolamwini"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1145\/3041008.3041012"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1145\/1970392.1970395"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1145\/3134600.3134606"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2015.35"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.49"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/SPW.2018.00009"},{"key":"ref45","article-title":"Debugging machine learning tasks","author":"Chakarov","year":"2016","journal-title":"CoRR"},{"key":"ref46","first-page":"289","article-title":"Privacy-preserving logistic regression","volume-title":"Proc. 22nd Annu. Conf. Neural Inf. Process. Syst.","author":"Chaudhuri"},{"key":"ref47","first-page":"1122","article-title":"Robust decision trees against adversarial examples","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Chen"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11302"},{"key":"ref49","article-title":"Robust high dimensional sparse regression and matching pursuit","author":"Chen","year":"2013","journal-title":"CoRR"},{"key":"ref50","article-title":"Query-efficient hard-label black-box attack: An optimization-based approach","volume-title":"Proc. 7th Int. Conf. Learn. Representations","author":"Cheng"},{"key":"ref51","first-page":"10932","article-title":"Improving black-box adversarial attacks with a transfer-based prior","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Systems","author":"Cheng"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/p19-1425"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1145\/3319535.3345660"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2018.8489592"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.18653\/vl\/N19-142"},{"key":"ref56","first-page":"1646","article-title":"Generalized no free lunch theorem for adversarial robustness","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Dohmatob"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00957"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00444"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1145\/3338906.3338954"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1007\/11761679_29"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1007\/11681878_14"},{"key":"ref62","first-page":"1802","article-title":"Exploring the landscape of spatial robustness","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Engstrom"},{"key":"ref63","first-page":"1605","article-title":"Local model poisoning attacks to byzantine-robust federated learning","volume-title":"Proc. 29th USENIX Secur. Symp. Secur.","author":"Fang"},{"key":"ref64","first-page":"253","article-title":"Robust logistic regression and classification","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Feng"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1145\/2810103.2813677"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1145\/3243734.3243834"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/SPW.2018.00016"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1145\/3359789.3359790"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2018.00058"},{"key":"ref70","first-page":"201","article-title":"CryptoNets: Applying neural networks to encrypted data with high throughput and accuracy","volume-title":"Proc. 33nd Int. Conf. Mach. Learn.","author":"Gilad-Bachrach"},{"key":"ref71","article-title":"Adversarial policies: Attacking deep reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Gleave"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/649"},{"key":"ref73","article-title":"Crafting adversarial examples for speech paralinguistics applications","author":"Gong","year":"2017","journal-title":"CoRR"},{"key":"ref74","article-title":"Explaining and harnessing adversarial examples","author":"Goodfellow","year":"2014","journal-title":"CoRR"},{"key":"ref75","article-title":"Towards deep neural network architectures robust to adversarial examples","author":"Gu","year":"2014","journal-title":"CoRR"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2909068"},{"key":"ref77","article-title":"Low frequency adversarial perturbation","author":"Guo","year":"2018","journal-title":"CoRR"},{"key":"ref78","article-title":"Countering adversarial images using input transformations","author":"Guo","year":"2017","journal-title":"CoRR"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1145\/3236024.3264835"},{"key":"ref80","first-page":"3820","article-title":"Subspace attack: Exploiting promising subspaces for query-efficient black-box attacks","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Guo"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2019.23064"},{"key":"ref82","article-title":"Amazon built an ai tool to hire people but had to shut it down because it was discriminating against women","author":"Hamilton"},{"key":"ref83","first-page":"555","article-title":"Learning privately from multiparty data","volume-title":"Proc. 33nd Int. Conf. Mach. Learn.","author":"Hamm"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1145\/1629080.1629082"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1109\/SPW.2018.00015"},{"key":"ref86","article-title":"LOGAN: Evaluating privacy leakage of generative models using generative adversarial networks","author":"Hayes","year":"2017","journal-title":"CoRR"},{"key":"ref87","article-title":"Decision boundary analysis of adversarial examples","volume-title":"Proc. Int. Conf. Learn. Representations","author":"He"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.1145\/3359789.3359824"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1145\/3133956.3134012"},{"key":"ref90","first-page":"245","article-title":"Black-box attacks against RNN based malware detection algorithms","volume-title":"Proc. Workshops 32nd AAAI Conf. Artif. Intell.","author":"Hu"},{"key":"ref91","article-title":"Generating adversarial malware examples for black-box attacks based on GAN","author":"Hu","year":"2017","journal-title":"CoRR"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1145\/3195970.3196105"},{"key":"ref93","article-title":"Learning with a strong adversary","author":"Huang","year":"2015","journal-title":"CoRR"},{"key":"ref94","article-title":"Adversarial attacks on neural network policies","author":"Huang","year":"2017","journal-title":"CoRR"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-40667-1_20"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.1016\/j.cosrev.2020.100270"},{"key":"ref97","article-title":"Efficient deep learning on multi-source private data","author":"Hynes","year":"2018","journal-title":"CoRR"},{"key":"ref98","article-title":"Query-efficient black-box adversarial examples","author":"Ilyas","year":"2017","journal-title":"CoRR"},{"key":"ref99","first-page":"2142","article-title":"Black-box adversarial attacks with limited queries and information","volume-title":"Proc. 35th Int. Conf. Mach. Learn.","author":"Ilyas"},{"key":"ref100","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2018.00057"},{"key":"ref101","doi-asserted-by":"publisher","DOI":"10.1109\/TSE.2017.2778711"},{"key":"ref102","doi-asserted-by":"publisher","DOI":"10.1145\/3134600.3134635"},{"key":"ref103","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00283"},{"key":"ref104","doi-asserted-by":"publisher","DOI":"10.1145\/3319535.3363201"},{"key":"ref105","doi-asserted-by":"publisher","DOI":"10.1145\/3243734.3243837"},{"key":"ref106","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/E17-2068"},{"key":"ref107","article-title":"PRADA: protecting against DNN model stealing attacks","author":"Juuti","year":"2018","journal-title":"CoRR"},{"key":"ref108","first-page":"1651","article-title":"GAZELLE: A low latency framework for secure neural network inference","volume-title":"Proc. 27th USENIX Secur. Symp.","author":"Juvekar"},{"key":"ref109","doi-asserted-by":"publisher","DOI":"10.1145\/2517312.2517320"},{"key":"ref110","doi-asserted-by":"publisher","DOI":"10.4204\/eptcs.257.3"},{"key":"ref111","article-title":"Model extraction warning in MLaaS paradigm","author":"Kesarwani","year":"2017","journal-title":"CoRR"},{"key":"ref112","first-page":"25.1","article-title":"Private convex optimization for empirical risk minimization with applications to high-dimensional regression","volume-title":"Proc. 25th Annu. Conf. Learn. Theory","author":"Kifer"},{"key":"ref113","doi-asserted-by":"publisher","DOI":"10.1109\/ICSE.2019.00108"},{"key":"ref114","doi-asserted-by":"publisher","DOI":"10.1109\/SPW.2018.00014"},{"key":"ref115","article-title":"Enhancing transformation-based defenses against adversarial attacks with a distribution classifier","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kou"},{"key":"ref116","article-title":"Deceiving end-to-end deep learning malware detectors using adversarial examples","author":"Kreuk","year":"2018"},{"key":"ref117","article-title":"CIFAR dataset","author":"Krizhevsky","year":"2019"},{"key":"ref118","article-title":"Adversarial machine learning at scale","author":"Kurakin","year":"2016","journal-title":"CoRR"},{"key":"ref119","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.249"},{"key":"ref120","article-title":"Mnist dataset","author":"LeCun","year":"2017"},{"key":"ref121","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2019.00044"},{"key":"ref122","article-title":"Defending against model stealing attacks using deceptive perturbations","author":"Lee","year":"2018","journal-title":"CoRR"},{"key":"ref123","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2019.23138"},{"key":"ref124","first-page":"3896","article-title":"Adversarial camera stickers: A physical camera-based attack on deep learning systems","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref125","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/654"},{"key":"ref126","first-page":"1200","article-title":"Query-efficient black-box attack by active learning","volume-title":"proc. IEEE Int. Conf. Data Mining","author":"Li"},{"key":"ref127","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2019.23202"},{"key":"ref128","article-title":"Implicit bias of gradient descent based adversarial training on separable data","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Li"},{"key":"ref129","doi-asserted-by":"publisher","DOI":"10.1109\/TDSC.2018.2874243"},{"key":"ref130","article-title":"Ai trends: Machine learning as a service (MLaaS)","author":"Light","year":"2018"},{"key":"ref131","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/415"},{"key":"ref132","doi-asserted-by":"publisher","DOI":"10.1145\/3128572.3140447"},{"key":"ref133","first-page":"4072","article-title":"On certifying non-uniform bounds against adversarial attacks","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Liu"},{"key":"ref134","first-page":"4042","article-title":"Data poisoning attacks on stochastic bandits","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Liu"},{"key":"ref135","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00303"},{"key":"ref136","doi-asserted-by":"publisher","DOI":"10.1145\/3133956.3134056"},{"key":"ref137","article-title":"Generative model: Membership attack, generalization and diversity","author":"Liu","year":"2018","journal-title":"CoRR"},{"key":"ref138","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2018.2805680"},{"key":"ref139","first-page":"9777","article-title":"A unified framework for data poisoning attack to graph-based semi-supervised learning","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Liu"},{"key":"ref140","doi-asserted-by":"publisher","DOI":"10.1145\/3319535.3363216"},{"key":"ref141","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2018.23291"},{"key":"ref142","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00499"},{"key":"ref143","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00095"},{"key":"ref144","article-title":"Understanding membership inferences on well-generalized learning models","author":"Long","year":"2018","journal-title":"CoRR"},{"key":"ref145","article-title":"Trusted computing and provenance: Better together","volume-title":"Proc. 2nd Workshop Theory Practice Provenance","author":"Lyle"},{"key":"ref146","doi-asserted-by":"publisher","DOI":"10.1145\/3238147.3238202"},{"key":"ref147","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2019.23415"},{"key":"ref148","article-title":"Characterizing adversarial subspaces using local intrinsic dimensionality","author":"Ma","year":"2018","journal-title":"CoRR"},{"key":"ref149","article-title":"Towards deep learning models resistant to adversarial attacks","author":"Madry","year":"2017","journal-title":"CoRR"},{"key":"ref150","first-page":"4274","article-title":"Data poisoning attacks in multi-party learning","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Mahloujifar"},{"key":"ref151","doi-asserted-by":"publisher","DOI":"10.1109\/SPW.2018.00023"},{"key":"ref152","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v29i1.9569"},{"key":"ref153","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2019.00029"},{"key":"ref154","doi-asserted-by":"publisher","DOI":"10.1145\/3133956.3134057"},{"key":"ref155","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.17"},{"key":"ref156","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.282"},{"key":"ref157","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00503"},{"key":"ref158","doi-asserted-by":"publisher","DOI":"10.1145\/3128572.3140451"},{"key":"ref159","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2017.172"},{"key":"ref160","first-page":"12885","article-title":"Cross-domain transferability of adversarial perturbations","volume-title":"Proc. Conf. Neural Inf. Process. Syst.","author":"Naseer"},{"key":"ref161","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2019.00065"},{"key":"ref162","doi-asserted-by":"publisher","DOI":"10.1145\/3243734.3243855"},{"key":"ref163","first-page":"1","article-title":"Exploiting machine learning to subvert your spam filter","volume-title":"Proc. 1st USENIX Workshop Large-Scale Exploits Emergent Threats","author":"Nelson"},{"key":"ref164","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-28954-6_7"},{"key":"ref165","first-page":"619","article-title":"Oblivious multi-party machine learning on trusted processors","volume-title":"Proc. 25th USENIX Secur. Symp.","author":"Ohrimenko"},{"key":"ref166","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-68136-8_8"},{"key":"ref167","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00509"},{"key":"ref168","article-title":"Prediction poisoning: Towards defenses against DNN model stealing attacks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Orekondy"},{"key":"ref169","article-title":"A hybrid deep learning architecture for privacy-preserving mobile analytics","author":"Ossia","year":"2017","journal-title":"CoRR"},{"key":"ref170","doi-asserted-by":"publisher","DOI":"10.1145\/3338906.3342502"},{"key":"ref171","article-title":"Robust deep learning via reverse cross-entropy training and thresholding test","author":"Pang","year":"2017","journal-title":"CoRR"},{"key":"ref172","article-title":"On the effectiveness of defensive distillation","author":"Papernot","year":"2016","journal-title":"CoRR"},{"key":"ref173","article-title":"Transferability in machine learning: From phenomena to black-box attacks using adversarial samples","author":"Papernot","year":"2016","journal-title":"CoRR"},{"key":"ref174","doi-asserted-by":"publisher","DOI":"10.1145\/3052973.3053009"},{"key":"ref175","doi-asserted-by":"publisher","DOI":"10.1109\/EuroSP.2016.36"},{"key":"ref176","doi-asserted-by":"publisher","DOI":"10.1109\/EuroSP.2018.00035"},{"key":"ref177","doi-asserted-by":"publisher","DOI":"10.1109\/MILCOM.2016.7795300"},{"key":"ref178","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2016.41"},{"key":"ref179","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178304"},{"key":"ref180","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/660"},{"key":"ref181","doi-asserted-by":"publisher","DOI":"10.1109\/TIFS.2017.2787987"},{"key":"ref182","article-title":"Privacy-preserving deep learning for any activation function","author":"Phong","year":"2018","journal-title":"CoRR"},{"key":"ref183","doi-asserted-by":"publisher","DOI":"10.1145\/3234150"},{"key":"ref184","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2018.23183"},{"key":"ref185","first-page":"14004","article-title":"Defending neural backdoors via generative distribution modeling","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Qiao"},{"key":"ref186","first-page":"5231","article-title":"Imperceptible, robust, and targeted adversarial examples for automatic speech recognition","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Qin"},{"key":"ref187","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00249"},{"key":"ref188","first-page":"3752","article-title":"Privacy-preserving classification of personal text messages with secure multi-party computation","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Reich"},{"key":"ref189","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1103"},{"key":"ref190","doi-asserted-by":"publisher","DOI":"10.1109\/SPW.2018.00019"},{"issue":"11","key":"ref191","first-page":"169","article-title":"On data banks and privacy homomorphisms","volume":"4","author":"Rivest","year":"1978","journal-title":"Found. Secure Comput."},{"key":"ref192","doi-asserted-by":"publisher","DOI":"10.23919\/ACC.2019.8814483"},{"key":"ref193","article-title":"Low resource black-box end-to-end attack against state of the art API call based malware classifiers","author":"Rosenberg","year":"2018","journal-title":"CoRR"},{"key":"ref194","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-00470-5_23"},{"key":"ref195","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11504"},{"key":"ref196","first-page":"5558","article-title":"White-box vs black-box: Bayes optimal strategies for membership inference","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Sablayrolles"},{"key":"ref197","first-page":"1291","article-title":"Updates-leak: Data set inference and reconstruction attacks in online learning","volume-title":"Proc. 29th USENIX Secur. Symp.","author":"Salem"},{"key":"ref198","article-title":"Ml-leaks: Model and data independent membership inference attacks and defenses on machine learning models","author":"Salem","year":"2018","journal-title":"CoRR"},{"key":"ref199","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1020"},{"key":"ref200","article-title":"Poison frogs! targeted clean-label poisoning attacks on neural networks","author":"Shafahi","year":"2018","journal-title":"CoRR"},{"key":"ref201","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2018.00211"},{"key":"ref202","doi-asserted-by":"publisher","DOI":"10.1145\/2810103.2813687"},{"key":"ref203","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.41"},{"key":"ref204","first-page":"5809","article-title":"First-order adversarial vulnerability of neural networks and input dimension","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Simon-Gabriel"},{"key":"ref205","doi-asserted-by":"publisher","DOI":"10.1145\/3133956.3134077"},{"key":"ref206","doi-asserted-by":"publisher","DOI":"10.1145\/3319535.3354211"},{"key":"ref207","doi-asserted-by":"publisher","DOI":"10.1109\/GlobalSIP.2013.6736861"},{"key":"ref208","article-title":"Pixeldefend: Leveraging generative models to understand and defend against adversarial examples","author":"Song","year":"2017","journal-title":"CoRR"},{"key":"ref209","doi-asserted-by":"publisher","DOI":"10.1007\/s10207-018-0399-z"},{"key":"ref210","first-page":"3520","article-title":"Certified defenses for data poisoning attacks","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Steinhardt"},{"key":"ref211","doi-asserted-by":"publisher","DOI":"10.1145\/3238147.3238172"},{"key":"ref212","article-title":"Can you really backdoor federated learning?","author":"Sun","year":"2019","journal-title":"CoRR"},{"key":"ref213","article-title":"Intriguing properties of neural networks","author":"Szegedy","year":"2013","journal-title":"CoRR"},{"key":"ref214","article-title":"Private empirical risk minimization beyond the worst case: The effect of the constraint set geometry","author":"Talwar","year":"2014","journal-title":"CoRR"},{"key":"ref215","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/663"},{"key":"ref216","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11828"},{"key":"ref217","doi-asserted-by":"publisher","DOI":"10.1145\/3180155.3180220"},{"key":"ref218","article-title":"Ensemble adversarial training: Attacks and defenses","author":"Tram\u00e8r","year":"2017","journal-title":"CoRR"},{"key":"ref219","article-title":"The space of transferable adversarial examples","author":"Tram\u00e8r","year":"2017","journal-title":"CoRR"},{"key":"ref220","doi-asserted-by":"publisher","DOI":"10.5555\/3241094.3241142"},{"key":"ref221","article-title":"Towards demystifying membership inference attacks","author":"Truex","year":"2018","journal-title":"CoRR"},{"key":"ref222","doi-asserted-by":"publisher","DOI":"10.1098\/rsta.2018.0083"},{"key":"ref223","doi-asserted-by":"publisher","DOI":"10.1145\/3147.3165"},{"key":"ref224","article-title":"Securenn: Efficient and private neural network training","volume":"2018","author":"Wagh","year":"2018","journal-title":"IACR Cryptology ePrint Archive"},{"key":"ref225","doi-asserted-by":"publisher","DOI":"10.1145\/3319535.3354206"},{"key":"ref226","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2018.00038"},{"key":"ref227","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2019.00031"},{"key":"ref228","first-page":"6555","article-title":"Improving neural language modeling via adversarial training","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Wang"},{"key":"ref229","first-page":"2719","article-title":"Differentially private empirical risk minimization revisited: Faster and more general","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Wang"},{"key":"ref230","doi-asserted-by":"publisher","DOI":"10.1109\/ICSE.2019.00126"},{"key":"ref231","article-title":"Detecting adversarial samples for deep neural networks through mutation testing","author":"Wang","year":"2018","journal-title":"CoRR"},{"key":"ref232","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00673"},{"key":"ref233","first-page":"1599","article-title":"Formal security analysis of neural networks using symbolic intervals","volume-title":"Proc. 27th USENIX Secur. Symp.","author":"Wang"},{"key":"ref234","doi-asserted-by":"publisher","DOI":"10.1109\/MILCOM.2012.6415609"},{"key":"ref235","article-title":"Data poisoning attacks against online learning","author":"Wang","year":"2018","journal-title":"CoRR"},{"key":"ref236","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/134"},{"key":"ref237","first-page":"6727","article-title":"PROVEN: Verifying robustness of neural networks with a probabilistic approach","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Weng"},{"key":"ref238","article-title":"Fast is better than free: Revisiting adversarial training","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wong"},{"key":"ref239","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/669"},{"key":"ref240","article-title":"Graph backdoor","author":"Xi","year":"2020","journal-title":"CoRR"},{"key":"ref241","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/670"},{"key":"ref242","first-page":"1689","article-title":"Is feature selection secure against training data poisoning?","volume-title":"Proc. 32nd Int. Conf. Mach. Learn.","author":"Xiao"},{"key":"ref243","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2014.08.081"},{"key":"ref244","first-page":"870","article-title":"Adversarial label flips attack on support vector machines","volume-title":"Proc. 20th Eur. Conf. Artif. Intell.","author":"Xiao"},{"key":"ref245","doi-asserted-by":"publisher","DOI":"10.1109\/SPW.2018.00027"},{"key":"ref246","article-title":"DBA: Distributed backdoor attacks against federated learning","volume-title":"Proc. 8th Int. Conf. Learn. Representations","author":"Xie"},{"key":"ref247","article-title":"Mitigating adversarial effects through randomization","author":"Xie","year":"2017","journal-title":"CoRR"},{"key":"ref248","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00284"},{"key":"ref249","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/671"},{"key":"ref250","doi-asserted-by":"publisher","DOI":"10.1145\/3293882.3330579"},{"key":"ref251","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/550"},{"key":"ref252","doi-asserted-by":"publisher","DOI":"10.1109\/QRS-C.2018.00085"},{"key":"ref253","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2018.23198"},{"key":"ref254","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/741"},{"key":"ref255","first-page":"7025","article-title":"Me-net: Towards effective adversarial robustness with matrix estimation","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Yang"},{"key":"ref256","doi-asserted-by":"publisher","DOI":"10.1145\/3319535.3354261"},{"key":"ref257","doi-asserted-by":"publisher","DOI":"10.1109\/SFCS.1982.38"},{"key":"ref258","doi-asserted-by":"publisher","DOI":"10.1145\/3319535.3354209"},{"key":"ref259","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00020"},{"key":"ref260","doi-asserted-by":"publisher","DOI":"10.1145\/3133956.3134085"},{"key":"ref261","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2019.00019"},{"key":"ref262","first-page":"49","article-title":"Commandersong: A systematic approach for practical adversarial voice recognition","volume-title":"Proc. 27th USENIX Secur. Symp.","author":"Yuan"},{"key":"ref263","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.540"},{"key":"ref264","doi-asserted-by":"publisher","DOI":"10.1145\/3128572.3140449"},{"key":"ref265","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00443"},{"key":"ref266","first-page":"1829","article-title":"Defense against adversarial attacks using feature scattering-based adversarial training","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Zhang"},{"key":"ref267","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/674"},{"key":"ref268","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1559"},{"key":"ref269","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2017\/548"},{"key":"ref270","doi-asserted-by":"publisher","DOI":"10.1109\/TSE.2019.2962027"},{"key":"ref271","doi-asserted-by":"publisher","DOI":"10.1145\/3238147.3238187"},{"key":"ref272","article-title":"Privacy-preserving machine learning through data obfuscation","author":"Zhang","year":"2018","journal-title":"CoRR"},{"key":"ref273","doi-asserted-by":"publisher","DOI":"10.1145\/2996758.2996762"},{"key":"ref274","doi-asserted-by":"publisher","DOI":"10.1145\/3319535.3354259"},{"key":"ref275","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2019.00045"},{"key":"ref276","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.319"},{"key":"ref277","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/872"}],"container-title":["IEEE Transactions on Software Engineering"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/32\/9775544\/09252914.pdf?arnumber=9252914","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,9]],"date-time":"2024-01-09T23:23:11Z","timestamp":1704842591000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9252914\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,1]]},"references-count":268,"journal-issue":{"issue":"5"},"URL":"https:\/\/doi.org\/10.1109\/tse.2020.3034721","relation":{},"ISSN":["0098-5589","1939-3520","2326-3881"],"issn-type":[{"value":"0098-5589","type":"print"},{"value":"1939-3520","type":"electronic"},{"value":"2326-3881","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,5,1]]}}}