{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,4]],"date-time":"2025-12-04T10:00:18Z","timestamp":1764842418898},"reference-count":47,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"5","license":[{"start":{"date-parts":[[2021,9,1]],"date-time":"2021-09-01T00:00:00Z","timestamp":1630454400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Dependable and Secure Comput."],"published-print":{"date-parts":[[2021,9,1]]},"DOI":"10.1109\/tdsc.2020.3021008","type":"journal-article","created":{"date-parts":[[2020,9,1]],"date-time":"2020-09-01T20:39:36Z","timestamp":1598992776000},"page":"2074-2087","source":"Crossref","is-referenced-by-count":26,"title":["Man-in-the-Middle Attacks Against Machine Learning Classifiers Via Malicious Generative Models"],"prefix":"10.1109","volume":"18","author":[{"given":"Derui","family":"Wang","sequence":"first","affiliation":[]},{"given":"Chaoran","family":"Li","sequence":"additional","affiliation":[]},{"given":"Sheng","family":"Wen","sequence":"additional","affiliation":[]},{"given":"Surya","family":"Nepal","sequence":"additional","affiliation":[]},{"given":"Yang","family":"Xiang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1145\/2976749.2978392"},{"key":"ref38","article-title":"Learning a driving simulator","author":"santana","year":"2016"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01258-8_10"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2018.2861800"},{"key":"ref31","first-page":"2574","article-title":"Deepfool: A simple and accurate method to fool deep neural networks","author":"moosavidezfooli","year":"2016","journal-title":"Proc IEEE Conf Comput Vis Pattern Recognit"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.17"},{"key":"ref37","first-page":"729","article-title":"Intrusion detection in the cloud","author":"roschke","year":"2009","journal-title":"Proc IEEE Int Conf Dependable Auton Secure Comput"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00263"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/EuroSP.2016.36"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/3052973.3053009"},{"key":"ref10","article-title":"Diagnosing and enhancing vae models","author":"dai","year":"2019"},{"key":"ref40","article-title":"Intriguing properties of neural networks","author":"szegedy","year":"2013"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4419-5906-5_324"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/WACV45572.2020.9093393"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-2006"},{"key":"ref14","article-title":"A rotation and a translation suffice: Fooling cnns with simple transformations","author":"engstrom","year":"2017"},{"key":"ref15","article-title":"Explaining and harnessing adversarial examples","author":"goodfellow","year":"2015"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-66399-9_4"},{"key":"ref17","first-page":"497","article-title":"Terminal brain damage: Exposing the graceless degradation in deep neural networks under hardware fault attacks","author":"hong","year":"2019","journal-title":"Proc 28th USENIX Secur Symp"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2018.00212"},{"key":"ref19","first-page":"2142","article-title":"Black-box adversarial attacks with limited queries and information","author":"ilyas","year":"2018","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref28","article-title":"Delving into transferable adversarial examples and black-box attacks","author":"liu","year":"2016"},{"key":"ref4","first-page":"284","article-title":"Synthesizing robust adversarial examples","author":"athalye","year":"0"},{"key":"ref27","first-page":"3727","article-title":"Zeroth-order stochastic variance reduction for nonconvex optimization","author":"liu","year":"2018","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref3","first-page":"274","article-title":"Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples","author":"athalye","year":"2018","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref6","first-page":"1","article-title":"Neural photo editing with introspective adversarial networks","author":"brock","year":"2017","journal-title":"Proc 5th Int Conf Learn Representations"},{"key":"ref29","article-title":"Towards deep learning models resistant to adversarial attacks","author":"madry","year":"2018","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref5","first-page":"2938","article-title":"How to backdoor federated learning","author":"bagdasaryan","year":"0"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.49"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/3128572.3140444"},{"key":"ref2","first-page":"1","article-title":"Variational autoencoder based anomaly detection using reconstruction probability","volume":"2","author":"an","year":"2015","journal-title":"Special lecture"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2018.2852738"},{"key":"ref1","article-title":"Genattack: Practical black-box attacks with gradient-free optimization","author":"alzantot","year":"2018"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/543"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1145\/3243734.3243757"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1145\/1390156.1390294"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/SPW.2018.00014"},{"key":"ref47","first-page":"234","article-title":"DaST: Data-free substitute training for adversarial attacks","author":"zhou","year":"2020","journal-title":"Proc IEEE Conf Comput Vis and Pattern Recog"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D17-1215"},{"key":"ref42","article-title":"Ensemble adversarial training: Attacks and defenses","author":"tram\u00e8r","year":"2017"},{"key":"ref24","first-page":"1558","article-title":"Autoencoding beyond pixels using a learned similarity metric","author":"larsen","year":"2016","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref41","article-title":"Wasserstein auto-encoders","author":"tolstikhin","year":"2018","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref23","article-title":"Adversarial examples in the physical world","author":"kurakin","year":"2016"},{"key":"ref44","article-title":"Split learning for health: Distributed deep learning without sharing raw patient data","author":"vepakomma","year":"2018"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.3390\/s19163445"},{"key":"ref43","article-title":"The space of transferable adversarial examples","author":"tram\u00e8r","year":"2017"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3097983.3098077"}],"container-title":["IEEE Transactions on Dependable and Secure Computing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8858\/9523876\/09183938.pdf?arnumber=9183938","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,12]],"date-time":"2022-01-12T16:06:33Z","timestamp":1642003593000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9183938\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,9,1]]},"references-count":47,"journal-issue":{"issue":"5"},"URL":"https:\/\/doi.org\/10.1109\/tdsc.2020.3021008","relation":{},"ISSN":["1545-5971","1941-0018","2160-9209"],"issn-type":[{"value":"1545-5971","type":"print"},{"value":"1941-0018","type":"electronic"},{"value":"2160-9209","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,9,1]]}}}