{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,19]],"date-time":"2025-09-19T10:50:37Z","timestamp":1758279037380,"version":"3.37.3"},"reference-count":48,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,1]],"date-time":"2022-05-01T00:00:00Z","timestamp":1651363200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,1]],"date-time":"2022-05-01T00:00:00Z","timestamp":1651363200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000006","name":"Office of Naval Research","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100000006","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5]]},"DOI":"10.1109\/spw54247.2022.9833895","type":"proceedings-article","created":{"date-parts":[[2022,7,25]],"date-time":"2022-07-25T20:14:47Z","timestamp":1658780087000},"page":"73-79","source":"Crossref","is-referenced-by-count":7,"title":["Ares: A System-Oriented Wargame Framework for Adversarial ML"],"prefix":"10.1109","author":[{"given":"Farhan","family":"Ahmed","sequence":"first","affiliation":[{"name":"Stony Brook University"}]},{"given":"Pratik","family":"Vaishnavi","sequence":"additional","affiliation":[{"name":"Stony Brook University"}]},{"given":"Kevin","family":"Eykholt","sequence":"additional","affiliation":[{"name":"IBM Research"}]},{"given":"Amir","family":"Rahmati","sequence":"additional","affiliation":[{"name":"Stony Brook University"}]}],"member":"263","reference":[{"key":"ref39","article-title":"Empir: Ensembles of mixed precision deep networks for increased robustness against adversarial attacks","author":"sen","year":"2019","journal-title":"International Conference on Learning Representations (ICLR)"},{"key":"ref38","article-title":"Improving adversarial robustness via promoting ensemble diversity","author":"pang","year":"2019","journal-title":"International Conference on Machine Learning (ICML)"},{"key":"ref33","article-title":"Provably robust deep learning via adversarially trained smoothed classifiers","author":"salman","year":"2019","journal-title":"Advances in Neural Information Processing Systems (NIPS)"},{"key":"ref32","article-title":"Certified adversarial robustness via randomized smoothing","author":"cohen","year":"2019","journal-title":"International Conference on Machine Learning (ICML)"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00059"},{"key":"ref30","article-title":"Theoretically principled trade-off between robustness and accuracy","author":"zhang","year":"2019","journal-title":"International Conference on Machine Learning (ICML)"},{"key":"ref37","article-title":"Error correcting output codes improve probability estimation and adversarial robustness of deep neural networks","author":"verma","year":"2019","journal-title":"Advances in Neural Information Processing Systems (NIPS)"},{"key":"ref36","article-title":"Robustness to adversarial examples through an ensemble of specialists","author":"abbasi","year":"2017","journal-title":"arXiv preprint arXiv 1702 06156"},{"key":"ref35","article-title":"Ensemble adversarial training: Attacks and defenses","author":"tram\u00e8r","year":"2018","journal-title":"International Conference on Learning Representations (ICLR)"},{"key":"ref34","article-title":"Macer: Attack-free and scalable robust training via maximizing certified radius","author":"zhai","year":"2020","journal-title":"International Conference on Learning Representations (ICLR)"},{"article-title":"Robustness (python library)","year":"2019","author":"engstrom","key":"ref10"},{"key":"ref40","article-title":"Adversarial example defenses: ensembles of weak defenses are not strong","author":"he","year":"2017","journal-title":"7th USENIX conference on Offensive Technologies"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.21105\/joss.02607"},{"key":"ref12","article-title":"Adversarial robustness toolbox v1.2.0","author":"nicolae","year":"2018","journal-title":"arXiv preprint arXiv 1807 01069"},{"article-title":"Adversarial attacks and defences: A survey","year":"2018","author":"chakraborty","key":"ref13"},{"key":"ref14","article-title":"Adversarial machine learning at scale","author":"kurakin","year":"2016","journal-title":"arXiv preprint arXiv 1611 01236"},{"key":"ref15","article-title":"Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks","author":"croce","year":"2020","journal-title":"International Conference on Machine Learning (ICML)"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/SP40000.2020.00073"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3052973.3053009"},{"key":"ref18","article-title":"Decision-based adversarial attacks: Reliable attacks against black-box machine learning models","author":"brendel","year":"2018","journal-title":"International Conference on Learning Representations (ICLR)"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00668"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00191"},{"key":"ref4","article-title":"Towards deep neural network architectures robust to adversarial examples","author":"gu","year":"2015","journal-title":"arXiv preprint arXiv 1412 5068"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/3133956.3134057"},{"key":"ref3","article-title":"Countering adversarial images using input transformations","author":"guo","year":"2017","journal-title":"arXiv preprint arXiv 1711 00540"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00175"},{"key":"ref29","article-title":"Towards deep learning models resistant to adversarial attacks","author":"madry","year":"2018","journal-title":"International Conference on Learning Representations (ICLR)"},{"key":"ref5","article-title":"Improving adversarial robustness by data-specific discretization","author":"chen","year":"2018","journal-title":"arXiv preprint arXiv 1805 07816"},{"key":"ref8","article-title":"Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples","author":"athalye","year":"2018","journal-title":"International Conference on Machine Learning (ICML)"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/SPW50608.2020.00028"},{"key":"ref2","article-title":"Explaining and harnessing adversarial examples","author":"goodfellow","year":"2014","journal-title":"International Conference on Learning Representations (ICLR)"},{"key":"ref9","article-title":"Technical report on the cleverhans v2.1.0 adversarial examples library","author":"papernot","year":"2018","journal-title":"arXiv preprint arXiv 1610 00768"},{"key":"ref1","article-title":"Intriguing properties of neural networks","author":"szegedy","year":"2014","journal-title":"International Conference on Learning Representations (ICLR)"},{"key":"ref46","article-title":"Trs: Transferability reduced ensemble via encouraging gradient diversity and model smoothness","author":"yang","year":"2021","journal-title":"arXiv preprint arXiv 2104 00671"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00444"},{"key":"ref45","article-title":"[p]y[t]orch: An imperative style, high-performance deep learning library","author":"paszke","year":"2019","journal-title":"Advances in Neural Information Processing Systems (NIPS)"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1145\/3385003.3410925"},{"key":"ref22","article-title":"Minimally distorted adversarial examples with a fast adaptive boundary attack","author":"croce","year":"2020","journal-title":"International Conference on Machine Learning (ICML)"},{"key":"ref47","article-title":"Graddiv: Adversarial robustness of randomized neural networks via gradient diversity regularization","author":"lee","year":"2021","journal-title":"arXiv preprint arXiv 2107 02425"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/SP40000.2020.00045"},{"key":"ref42","article-title":"Mtdeep: boosting the security of deep neural nets against adversarial attacks with moving target defense","author":"sengupta","year":"2018","journal-title":"Workshops at the Thirtieth AAAI Conference on Artificial Intelligence"},{"year":"0","key":"ref24","article-title":"Google cloud vision"},{"key":"ref41","article-title":"On adaptive attacks to adversarial example defenses","author":"tram\u00e8r","year":"2020","journal-title":"arXiv preprint arXiv 2002 05155"},{"year":"0","key":"ref23","article-title":"Azure computer vision"},{"article-title":"Openai gym","year":"2016","author":"brockman","key":"ref44"},{"year":"0","key":"ref26","article-title":"Clarifai"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1145\/3318216.3363338"},{"year":"0","key":"ref25","article-title":"Aws rekognition"}],"event":{"name":"2022 IEEE Security and Privacy Workshops (SPW)","start":{"date-parts":[[2022,5,22]]},"location":"San Francisco, CA, USA","end":{"date-parts":[[2022,5,26]]}},"container-title":["2022 IEEE Security and Privacy Workshops (SPW)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9833855\/9833856\/09833895.pdf?arnumber=9833895","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,15]],"date-time":"2022-08-15T20:02:39Z","timestamp":1660593759000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9833895\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5]]},"references-count":48,"URL":"https:\/\/doi.org\/10.1109\/spw54247.2022.9833895","relation":{},"subject":[],"published":{"date-parts":[[2022,5]]}}}