{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,23]],"date-time":"2025-12-23T10:04:06Z","timestamp":1766484246743,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":97,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,8,29]],"date-time":"2023-08-29T00:00:00Z","timestamp":1693267200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,8,29]]},"DOI":"10.1145\/3600160.3600193","type":"proceedings-article","created":{"date-parts":[[2023,8,9]],"date-time":"2023-08-09T22:54:41Z","timestamp":1691621681000},"page":"1-12","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":9,"title":["SoK: Modeling Explainability in Security Analytics for Interpretability, Trustworthiness, and Usability"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1214-1954","authenticated-orcid":false,"given":"Dipkamal","family":"Bhusal","sequence":"first","affiliation":[{"name":"Department of Software Engineering, Rochester Institute of Technology, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6791-5681","authenticated-orcid":false,"given":"Rosalyn","family":"Shin","sequence":"additional","affiliation":[{"name":"Independent Researcher, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-9256-1213","authenticated-orcid":false,"given":"Ajay Ashok","family":"Shewale","sequence":"additional","affiliation":[{"name":"Department of software engineering, Rochester Institute of Technology, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-4903-8152","authenticated-orcid":false,"given":"Monish Kumar Manikya","family":"Veerabhadran","sequence":"additional","affiliation":[{"name":"Department of Software Engineering, Rochester Institute of Technology, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8316-4929","authenticated-orcid":false,"given":"Michael","family":"Clifford","sequence":"additional","affiliation":[{"name":"Toyota InfoTech Labs, Toyota Motor North America, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3630-6269","authenticated-orcid":false,"given":"Sara","family":"Rampazzi","sequence":"additional","affiliation":[{"name":"Department of Computer and Information Science and Engineering, University of Florida, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2002-3213","authenticated-orcid":false,"given":"Nidhi","family":"Rastogi","sequence":"additional","affiliation":[{"name":"Department of Software Engineering, Rochester Institute of Technology, USA"}]}],"member":"320","published-online":{"date-parts":[[2023,8,29]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1145\/2976749.2978318"},{"key":"e_1_3_2_1_2_1","volume-title":"Sanity checks for saliency maps. NeurIPS","author":"Adebayo Julius","year":"2018","unstructured":"Julius Adebayo, Justin Gilmer, Michael Muelly, Ian Goodfellow, Moritz Hardt, and Been Kim. 2018. Sanity checks for saliency maps. NeurIPS (2018)."},{"volume-title":"Explainable Intelligence for Comprehensive Interpretation of Cybersecurity Data in Incident Management. Ph.\u00a0D. Dissertation","author":"AfzaliSeresht Neda","key":"e_1_3_2_1_3_1","unstructured":"Neda AfzaliSeresht. 2022. Explainable Intelligence for Comprehensive Interpretation of Cybersecurity Data in Incident Management. Ph.\u00a0D. Dissertation. Victoria University."},{"key":"e_1_3_2_1_4_1","volume-title":"Rethinking Stability for Attribution-based Explanations. In ICLR 2022 Workshop Privacy, Accountability, Interpretability, Robustness, Reasoning on Structured Data.","author":"Agarwal Chirag","year":"2022","unstructured":"Chirag Agarwal, Nari Johnson, Martin Pawelczyk, Satyapriya Krishna, Eshika Saxena, Marinka Zitnik, and Himabindu Lakkaraju. 2022. Rethinking Stability for Attribution-based Explanations. In ICLR 2022 Workshop Privacy, Accountability, Interpretability, Robustness, Reasoning on Structured Data."},{"key":"e_1_3_2_1_5_1","volume-title":"Proceedings of the 31st USENIX Security","author":"Alahmadi A","year":"2022","unstructured":"Bushra\u00a0A Alahmadi, Louise Axon, and Ivan Martinovic. 2022. 99% False Positives: A Qualitative Study of SOC Analysts\u2019 Perspectives on Security Alarms. In Proceedings of the 31st USENIX Security, Boston, MA, USA. 10\u201312."},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-demo.30"},{"key":"e_1_3_2_1_7_1","volume-title":"Towards robust interpretability with self-explaining neural networks. NeurIPS","author":"Alvarez\u00a0Melis David","year":"2018","unstructured":"David Alvarez\u00a0Melis and Tommi Jaakkola. 2018. Towards robust interpretability with self-explaining neural networks. NeurIPS (2018)."},{"key":"e_1_3_2_1_8_1","volume-title":"On the robustness of interpretability methods. arXiv preprint arXiv:1806.08049","author":"Alvarez-Melis David","year":"2018","unstructured":"David Alvarez-Melis and Tommi\u00a0S Jaakkola. 2018. On the robustness of interpretability methods. arXiv preprint arXiv:1806.08049 (2018)."},{"key":"e_1_3_2_1_9_1","volume-title":"One explanation does not fit all: A toolkit and taxonomy of ai explainability techniques. arXiv preprint arXiv:1909.03012","author":"Arya Vijay","year":"2019","unstructured":"Vijay Arya, Rachel\u00a0KE Bellamy, Pin-Yu Chen, Amit Dhurandhar, Michael Hind, Samuel\u00a0C Hoffman, Stephanie Houde, Q\u00a0Vera Liao, Ronny Luss, Aleksandra Mojsilovi\u0107, 2019. One explanation does not fit all: A toolkit and taxonomy of ai explainability techniques. arXiv preprint arXiv:1909.03012 (2019)."},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0130140"},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.3390\/info10040122"},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1145\/3514094.3534164"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2020\/417"},{"key":"e_1_3_2_1_14_1","volume-title":"Benchmarking and survey of explanation methods for black box models. arXiv preprint arXiv:2102.13076","author":"Bodria Francesco","year":"2021","unstructured":"Francesco Bodria, Fosca Giannotti, Riccardo Guidotti, Francesca Naretto, Dino Pedreschi, and Salvatore Rinzivillo. 2021. Benchmarking and survey of explanation methods for black box models. arXiv preprint arXiv:2102.13076 (2021)."},{"key":"e_1_3_2_1_15_1","unstructured":"Joy Buolamwini. 2018. When the robot doesn\u2019t see Dark skin. https:\/\/www.nytimes.com\/2018\/06\/21\/opinion\/facial-analysis-technology-bias.html"},{"key":"e_1_3_2_1_16_1","volume-title":"Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies","volume":"1","author":"Burstein Jill","year":"2019","unstructured":"Jill Burstein, Christy Doran, and Thamar Solorio. 2019. Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers). In Proceedings of the 2019 NAACL: Human Language Technologies, Vol 1)."},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i6.20561"},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"crossref","unstructured":"Nicholas Carlini and David Wagner. 2017. Towards evaluating the robustness of neural networks. In 2017 ieee symposium on security and privacy (sp). Ieee 39\u201357.","DOI":"10.1109\/SP.2017.49"},{"key":"e_1_3_2_1_19_1","volume-title":"International Conference on Machine Learning. PMLR, 1383\u20131391","author":"Chalasani Prasad","year":"2020","unstructured":"Prasad Chalasani, Jiefeng Chen, Amrita\u00a0Roy Chowdhury, Xi Wu, and Somesh Jha. 2020. Concise explanations of neural networks using adversarial training. In International Conference on Machine Learning. PMLR, 1383\u20131391."},{"key":"e_1_3_2_1_20_1","volume-title":"Common and best practices for security operations centers: Results of the 2019 SOC survey. SANS","author":"Crowley Chris","year":"2019","unstructured":"Chris Crowley and John Pescatore. 2019. Common and best practices for security operations centers: Results of the 2019 SOC survey. SANS, Bethesda, MD, USA, Tech. Rep (2019)."},{"key":"e_1_3_2_1_21_1","volume-title":"Automatic feature learning for vulnerability prediction. arXiv preprint arXiv:1708.02368","author":"Dam Hoa\u00a0Khanh","year":"2017","unstructured":"Hoa\u00a0Khanh Dam, Truyen Tran, Trang Pham, Shien\u00a0Wee Ng, John Grundy, and Aditya Ghose. 2017. Automatic feature learning for vulnerability prediction. arXiv preprint arXiv:1708.02368 (2017)."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2016.42"},{"key":"e_1_3_2_1_23_1","volume-title":"Explanations can be manipulated and geometry is to blame. NeurIPS","author":"Dombrowski Ann-Kathrin","year":"2019","unstructured":"Ann-Kathrin Dombrowski, Maximillian Alber, Christopher Anders, Marcel Ackermann, Klaus-Robert M\u00fcller, and Pan Kessel. 2019. Explanations can be manipulated and geometry is to blame. NeurIPS (2019)."},{"key":"e_1_3_2_1_24_1","volume-title":"Towards a rigorous science of interpretable machine learning. arXiv preprint arXiv:1702.08608","author":"Doshi-Velez Finale","year":"2017","unstructured":"Finale Doshi-Velez and Been Kim. 2017. Towards a rigorous science of interpretable machine learning. arXiv preprint arXiv:1702.08608 (2017)."},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"crossref","unstructured":"Min Du Feifei Li Guineng Zheng and Vivek Srikumar. 2017. DeepLog. https:\/\/github.com\/wuyifan18\/DeepLog","DOI":"10.1145\/3133956.3134015"},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.1145\/3133956.3134015"},{"key":"e_1_3_2_1_27_1","volume-title":"Conference On Learning Theory. PMLR, 1693\u20131702","author":"Dwork Cynthia","year":"2018","unstructured":"Cynthia Dwork and Vitaly Feldman. 2018. Privacy-preserving prediction. In Conference On Learning Theory. PMLR, 1693\u20131702."},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-60117-1_33"},{"key":"e_1_3_2_1_29_1","volume-title":"Improving performance of deep learning models with axiomatic attribution priors and expected gradients. Nature machine intelligence 3, 7","author":"Erion Gabriel","year":"2021","unstructured":"Gabriel Erion, Joseph\u00a0D Janizek, Pascal Sturmfels, Scott\u00a0M Lundberg, and Su-In Lee. 2021. Improving performance of deep learning models with axiomatic attribution priors and expected gradients. Nature machine intelligence 3, 7 (2021), 620\u2013631."},{"key":"e_1_3_2_1_30_1","volume-title":"On the connection between adversarial robustness and saliency map interpretability. arXiv preprint arXiv:1905.04172","author":"Etmann Christian","year":"2019","unstructured":"Christian Etmann, Sebastian Lunz, Peter Maass, and Carola-Bibiane Sch\u00f6nlieb. 2019. On the connection between adversarial robustness and saliency map interpretability. arXiv preprint arXiv:1905.04172 (2019)."},{"key":"e_1_3_2_1_31_1","volume-title":"Detecting adversarial samples from artifacts. arXiv preprint arXiv:1703.00410","author":"Feinman Reuben","year":"2017","unstructured":"Reuben Feinman, Ryan\u00a0R Curtin, Saurabh Shintre, and Andrew\u00a0B Gardner. 2017. Detecting adversarial samples from artifacts. arXiv preprint arXiv:1703.00410 (2017)."},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1145\/3548606.3559392"},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33013681"},{"key":"e_1_3_2_1_34_1","volume-title":"Explaining and harnessing adversarial examples. arXiv preprint arXiv:1412.6572","author":"Goodfellow J","year":"2014","unstructured":"Ian\u00a0J Goodfellow, Jonathon Shlens, and Christian Szegedy. 2014. Explaining and harnessing adversarial examples. arXiv preprint arXiv:1412.6572 (2014)."},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/3243734.3243792"},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.1145\/3460120.3484589"},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5827"},{"key":"e_1_3_2_1_38_1","volume-title":"Explainable Artificial Intelligence in Cybersecurity: A Brief Review. In 2021 4th International Conference on Security and Privacy (ISEA-ISAP). IEEE, 1\u201312","author":"Hariharan Swetha","year":"2021","unstructured":"Swetha Hariharan, Anusha Velicheti, AS Anagha, Ciza Thomas, and N Balakrishnan. 2021. Explainable Artificial Intelligence in Cybersecurity: A Brief Review. In 2021 4th International Conference on Security and Privacy (ISEA-ISAP). IEEE, 1\u201312."},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"e_1_3_2_1_40_1","first-page":"1","article-title":"Quantus: An Explainable AI Toolkit for Responsible Evaluation of Neural Network Explanations and Beyond","volume":"24","author":"Hedstr\u00f6m Anna","year":"2023","unstructured":"Anna Hedstr\u00f6m, Leander Weber, Daniel Krakowczyk, Dilyara Bareeva, Franz Motzkus, Wojciech Samek, Sebastian Lapuschkin, and Marina Marina\u00a0M.-C. H\u00f6hne. 2023. Quantus: An Explainable AI Toolkit for Responsible Evaluation of Neural Network Explanations and Beyond. Journal of Machine Learning Research 24, 34 (2023), 1\u201311. http:\/\/jmlr.org\/papers\/v24\/22-0142.html","journal-title":"Journal of Machine Learning Research"},{"key":"e_1_3_2_1_41_1","volume-title":"Fooling neural network interpretations via adversarial model manipulation. NeurIPS","author":"Heo Juyeon","year":"2019","unstructured":"Juyeon Heo, Sunghwan Joo, and Taesup Moon. 2019. Fooling neural network interpretations via adversarial model manipulation. NeurIPS (2019)."},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i6.20651"},{"key":"e_1_3_2_1_43_1","volume-title":"Attribution-driven causal analysis for detection of adversarial examples. arXiv preprint arXiv:1903.05821","author":"Jha Susmit","year":"2019","unstructured":"Susmit Jha, Sunny Raj, Steven\u00a0Lawrence Fernandes, Sumit\u00a0Kumar Jha, Somesh Jha, Gunjan Verma, Brian Jalaian, and Ananthram Swami. 2019. Attribution-driven causal analysis for detection of adversarial examples. arXiv preprint arXiv:1903.05821 (2019)."},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.23919\/ICIF.2018.8455737"},{"key":"e_1_3_2_1_45_1","volume-title":"Learning how to explain neural networks: Patternnet and patternattribution. arXiv preprint arXiv:1705.05598","author":"Kindermans Pieter-Jan","year":"2017","unstructured":"Pieter-Jan Kindermans, Kristof\u00a0T Sch\u00fctt, Maximilian Alber, Klaus-Robert M\u00fcller, Dumitru Erhan, Been Kim, and Sven D\u00e4hne. 2017. Learning how to explain neural networks: Patternnet and patternattribution. arXiv preprint arXiv:1705.05598 (2017)."},{"key":"e_1_3_2_1_46_1","volume-title":"Captum: A unified and generic model interpretability library for PyTorch. arxiv:2009.07896\u00a0[cs.LG]","author":"Kokhlikyan Narine","year":"2020","unstructured":"Narine Kokhlikyan, Vivek Miglani, Miguel Martin, Edward Wang, Bilal Alsallakh, Jonathan Reynolds, Alexander Melnikov, Natalia Kliushkina, Carlos Araya, Siqi Yan, and Orion Reblitz-Richardson. 2020. Captum: A unified and generic model interpretability library for PyTorch. arxiv:2009.07896\u00a0[cs.LG]"},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"publisher","DOI":"10.1145\/2858036.2858529"},{"key":"e_1_3_2_1_48_1","volume-title":"GI SIG SIDAR workshop, DIMVA","author":"Kruegel Christopher","year":"2004","unstructured":"Christopher Kruegel and William Robertson. 2004. Alert verification determining the success of intrusion attempts. In Detection of intrusions and malware & vulnerability assessment, GI SIG SIDAR workshop, DIMVA 2004. Gesellschaft f\u00fcr Informatik eV."},{"key":"e_1_3_2_1_49_1","volume-title":"Adversarial machine learning at scale. arXiv preprint arXiv:1611.01236","author":"Kurakin Alexey","year":"2016","unstructured":"Alexey Kurakin, Ian Goodfellow, and Samy Bengio. 2016. Adversarial machine learning at scale. arXiv preprint arXiv:1611.01236 (2016)."},{"key":"e_1_3_2_1_50_1","volume-title":"2014 IEEE symposium on security and privacy. IEEE, 197\u2013211","author":"Laskov Pavel","year":"2014","unstructured":"Pavel Laskov 2014. Practical evasion of a learning-based classifier: A case study. In 2014 IEEE symposium on security and privacy. IEEE, 197\u2013211."},{"key":"e_1_3_2_1_51_1","doi-asserted-by":"publisher","DOI":"10.1109\/5.726791"},{"key":"e_1_3_2_1_52_1","doi-asserted-by":"publisher","DOI":"10.3390\/e23010018"},{"key":"e_1_3_2_1_53_1","volume-title":"A unified approach to interpreting model predictions. NeurIPS","author":"Lundberg M","year":"2017","unstructured":"Scott\u00a0M Lundberg and Su-In Lee. 2017. A unified approach to interpreting model predictions. NeurIPS (2017)."},{"key":"e_1_3_2_1_54_1","volume-title":"Characterizing adversarial subspaces using local intrinsic dimensionality. arXiv preprint arXiv:1801.02613","author":"Ma Xingjun","year":"2018","unstructured":"Xingjun Ma, Bo Li, Yisen Wang, Sarah\u00a0M Erfani, Sudanthi Wijewickrema, Grant Schoenebeck, Dawn Song, Michael\u00a0E Houle, and James Bailey. 2018. Characterizing adversarial subspaces using local intrinsic dimensionality. arXiv preprint arXiv:1801.02613 (2018)."},{"key":"e_1_3_2_1_55_1","volume-title":"Towards deep learning models resistant to adversarial attacks. arXiv preprint arXiv:1706.06083","author":"Madry Aleksander","year":"2017","unstructured":"Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. 2017. Towards deep learning models resistant to adversarial attacks. arXiv preprint arXiv:1706.06083 (2017)."},{"key":"e_1_3_2_1_56_1","unstructured":"Brendan McMahan Eider Moore Daniel Ramage Seth Hampson and Blaise\u00a0Aguera y Arcas. 2017. Communication-efficient learning of deep networks from decentralized data. In Artificial intelligence and statistics. PMLR 1273\u20131282."},{"key":"e_1_3_2_1_57_1","volume-title":"Explainable AI: Beware of inmates running the asylum or: How I learnt to stop worrying and love the social and behavioural sciences. arXiv preprint arXiv:1712.00547","author":"Miller Tim","year":"2017","unstructured":"Tim Miller, Piers Howe, and Liz Sonenberg. 2017. Explainable AI: Beware of inmates running the asylum or: How I learnt to stop worrying and love the social and behavioural sciences. arXiv preprint arXiv:1712.00547 (2017)."},{"key":"e_1_3_2_1_58_1","doi-asserted-by":"publisher","DOI":"10.1145\/3287560.3287562"},{"key":"e_1_3_2_1_59_1","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.1900654116"},{"key":"e_1_3_2_1_60_1","volume-title":"SoK: Explainable Machine Learning for Computer Security Applications. arXiv preprint arXiv:2208.10605","author":"Nadeem Azqa","year":"2022","unstructured":"Azqa Nadeem, Dani\u00ebl Vos, Clinton Cao, Luca Pajola, Simon Dieck, Robert Baumgartner, and Sicco Verwer. 2022. SoK: Explainable Machine Learning for Computer Security Applications. arXiv preprint arXiv:2208.10605 (2022)."},{"key":"e_1_3_2_1_61_1","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2022.3142719"},{"key":"e_1_3_2_1_62_1","doi-asserted-by":"publisher","DOI":"10.14722\/usec.2022.23014"},{"key":"e_1_3_2_1_64_1","volume-title":"Technical Report on the CleverHans v2.1.0 Adversarial Examples Library. arXiv preprint arXiv:1610.00768","author":"Papernot Nicolas","year":"2018","unstructured":"Nicolas Papernot, Fartash Faghri, Nicholas Carlini, Ian Goodfellow, Reuben Feinman, Alexey Kurakin, Cihang Xie, Yash Sharma, Tom Brown, Aurko Roy, Alexander Matyasko, Vahid Behzadan, Karen Hambardzumyan, Zhishuai Zhang, Yi-Lin Juang, Zhi Li, Ryan Sheatsley, Abhibhav Garg, Jonathan Uesato, Willi Gierke, Yinpeng Dong, David Berthelot, Paul Hendricks, Jonas Rauber, and Rujun Long. 2018. Technical Report on the CleverHans v2.1.0 Adversarial Examples Library. arXiv preprint arXiv:1610.00768 (2018)."},{"key":"e_1_3_2_1_65_1","doi-asserted-by":"publisher","DOI":"10.1145\/3052973.3053009"},{"key":"e_1_3_2_1_66_1","volume-title":"Proceedings of the Network and Distributed Systems Security (NDSS) Symposium.","author":"Parra GDLT","year":"2022","unstructured":"GDLT Parra, Luis Selvera, Joseph Khoury, Hector Irizarry, Elias Bou-Harb, and Paul Rad. 2022. Interpretable federated transformer log learning for cloud threat forensics. In Proceedings of the Network and Distributed Systems Security (NDSS) Symposium."},{"key":"e_1_3_2_1_67_1","doi-asserted-by":"publisher","DOI":"10.1145\/3531146.3533235"},{"key":"e_1_3_2_1_68_1","volume-title":"Explanation and trust: what to tell the user in security and AI?Ethics and information technology 13, 1","author":"Pieters Wolter","year":"2011","unstructured":"Wolter Pieters. 2011. Explanation and trust: what to tell the user in security and AI?Ethics and information technology 13, 1 (2011), 53\u201364."},{"key":"e_1_3_2_1_69_1","volume-title":"On the amplification of security and privacy risks by post-hoc explanations in machine learning models. arXiv preprint arXiv:2206.14004","author":"Quan Pengrui","year":"2022","unstructured":"Pengrui Quan, Supriyo Chakraborty, Jeya\u00a0Vikranth Jeyakumar, and Mani Srivastava. 2022. On the amplification of security and privacy risks by post-hoc explanations in machine learning models. arXiv preprint arXiv:2206.14004 (2022)."},{"volume-title":"22nd ACM SIGKDD.","author":"Ribeiro Marco\u00a0Tulio","key":"e_1_3_2_1_70_1","unstructured":"Marco\u00a0Tulio Ribeiro, Sameer Singh, and Carlos Guestrin. 2016. \" Why should i trust you?\" Explaining the predictions of any classifier. In 22nd ACM SIGKDD."},{"key":"e_1_3_2_1_71_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11491"},{"volume-title":"Human and machine learning","author":"Robnik-\u0160ikonja Marko","key":"e_1_3_2_1_72_1","unstructured":"Marko Robnik-\u0160ikonja and Marko Bohanec. 2018. Perturbation-based explanations of prediction models. In Human and machine learning. Springer, 159\u2013175."},{"key":"e_1_3_2_1_73_1","volume-title":"Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead. Nature machine intelligence 1, 5","author":"Rudin Cynthia","year":"2019","unstructured":"Cynthia Rudin. 2019. Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead. Nature machine intelligence 1, 5 (2019), 206\u2013215."},{"key":"e_1_3_2_1_74_1","doi-asserted-by":"publisher","DOI":"10.1214\/21-SS133"},{"key":"e_1_3_2_1_75_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.74"},{"key":"e_1_3_2_1_76_1","doi-asserted-by":"publisher","DOI":"10.1145\/3461702.3462533"},{"key":"e_1_3_2_1_77_1","volume-title":"Membership Inference Attacks Against Machine Learning Models. 2017 IEEE Symposium on Security and Privacy (SP)","author":"Shokri R.","year":"2017","unstructured":"R. Shokri, Marco Stronati, Congzheng Song, and Vitaly Shmatikov. 2017. Membership Inference Attacks Against Machine Learning Models. 2017 IEEE Symposium on Security and Privacy (SP) (2017), 3\u201318."},{"key":"e_1_3_2_1_78_1","volume-title":"International conference on machine learning. PMLR, 3145\u20133153","author":"Shrikumar Avanti","year":"2017","unstructured":"Avanti Shrikumar, Peyton Greenside, and Anshul Kundaje. 2017. Learning important features through propagating activation differences. In International conference on machine learning. PMLR, 3145\u20133153."},{"key":"e_1_3_2_1_79_1","volume-title":"Not just a black box: Learning important features through propagating activation differences. arXiv preprint arXiv:1605.01713","author":"Shrikumar Avanti","year":"2016","unstructured":"Avanti Shrikumar, Peyton Greenside, Anna Shcherbina, and Anshul Kundaje. 2016. Not just a black box: Learning important features through propagating activation differences. arXiv preprint arXiv:1605.01713 (2016)."},{"key":"e_1_3_2_1_80_1","volume-title":"Deep inside convolutional networks: Visualising image classification models and saliency maps. arXiv preprint arXiv:1312.6034","author":"Simonyan Karen","year":"2013","unstructured":"Karen Simonyan, Andrea Vedaldi, and Andrew Zisserman. 2013. Deep inside convolutional networks: Visualising image classification models and saliency maps. arXiv preprint arXiv:1312.6034 (2013)."},{"key":"e_1_3_2_1_81_1","doi-asserted-by":"publisher","DOI":"10.1145\/3375627.3375830"},{"key":"e_1_3_2_1_82_1","volume-title":"Smoothgrad: removing noise by adding noise. arXiv preprint arXiv:1706.03825","author":"Smilkov Daniel","year":"2017","unstructured":"Daniel Smilkov, Nikhil Thorat, Been Kim, Fernanda Vi\u00e9gas, and Martin Wattenberg. 2017. Smoothgrad: removing noise by adding noise. arXiv preprint arXiv:1706.03825 (2017)."},{"key":"e_1_3_2_1_83_1","doi-asserted-by":"publisher","DOI":"10.1145\/3351095.3372870"},{"key":"e_1_3_2_1_84_1","doi-asserted-by":"publisher","DOI":"10.23915\/distill.00022"},{"key":"e_1_3_2_1_85_1","volume-title":"International conference on machine learning. PMLR, 3319\u20133328","author":"Sundararajan Mukund","year":"2017","unstructured":"Mukund Sundararajan, Ankur Taly, and Qiqi Yan. 2017. Axiomatic attribution for deep networks. In International conference on machine learning. PMLR, 3319\u20133328."},{"key":"e_1_3_2_1_86_1","volume-title":"Intriguing properties of neural networks. arXiv preprint arXiv:1312.6199","author":"Szegedy Christian","year":"2013","unstructured":"Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. 2013. Intriguing properties of neural networks. arXiv preprint arXiv:1312.6199 (2013)."},{"key":"e_1_3_2_1_87_1","volume-title":"DEEPCASE: Semi-Supervised Contextual Analysis of Security Events","author":"van Ede Thijs","year":"2022","unstructured":"Thijs van Ede, Hojjat Aghakhani, Noah Spahn, Riccardo Bortolameotti, Marco Cova, Andrea Continella, Maarten van Steen, Andreas Peter, Christopher Kruegel, and Giovanni Vigna. 2022. DEEPCASE: Semi-Supervised Contextual Analysis of Security Events. IEEE Security and Privacy (2022)."},{"key":"e_1_3_2_1_88_1","volume-title":"Attention is all you need. NeurIPS","author":"Vaswani Ashish","year":"2017","unstructured":"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan\u00a0N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. NeurIPS (2017)."},{"key":"e_1_3_2_1_89_1","doi-asserted-by":"publisher","DOI":"10.1109\/EuroSPW51379.2020.00045"},{"key":"e_1_3_2_1_90_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3403044"},{"volume-title":"Evaluating explanation methods for deep learning in security. In 2020 IEEE european symposium on security and privacy (EuroS&P)","author":"Warnecke Alexander","key":"e_1_3_2_1_91_1","unstructured":"Alexander Warnecke, Daniel Arp, Christian Wressnegger, and Konrad Rieck. 2020. Evaluating explanation methods for deep learning in security. In 2020 IEEE european symposium on security and privacy (EuroS&P). IEEE, 158\u2013174."},{"key":"e_1_3_2_1_92_1","doi-asserted-by":"publisher","DOI":"10.1145\/1629575.1629587"},{"key":"e_1_3_2_1_93_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.6140"},{"key":"e_1_3_2_1_94_1","first-page":"20554","article-title":"On completeness-aware concept-based explanations in deep neural networks","volume":"33","author":"Yeh Chih-Kuan","year":"2020","unstructured":"Chih-Kuan Yeh, Been Kim, Sercan Arik, Chun-Liang Li, Tomas Pfister, and Pradeep Ravikumar. 2020. On completeness-aware concept-based explanations in deep neural networks. Advances in Neural Information Processing Systems 33 (2020), 20554\u201320565.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_95_1","volume-title":"Droidfusion: A novel multilevel classifier fusion approach for android malware detection","author":"Yerima Y","year":"2018","unstructured":"Suleiman\u00a0Y Yerima and Sakir Sezer. 2018. Droidfusion: A novel multilevel classifier fusion approach for android malware detection. IEEE transactions on cybernetics 49, 2 (2018), 453\u2013466."},{"key":"e_1_3_2_1_96_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10590-1_53"},{"key":"e_1_3_2_1_97_1","unstructured":"Xinyang Zhang Ningfei Wang Hua Shen Shouling Ji Xiapu Luo and Ting Wang. 2020. Interpretable deep learning under fire. In { USENIX} \u201920)."},{"key":"e_1_3_2_1_98_1","volume-title":"AI for Social Good Workshop.","author":"Zhou Yan","year":"2020","unstructured":"Yan Zhou and Murat Kantarcioglu. 2020. On transparency of machine learning models: A position paper. In AI for Social Good Workshop."}],"event":{"name":"ARES 2023: The 18th International Conference on Availability, Reliability and Security","acronym":"ARES 2023","location":"Benevento Italy"},"container-title":["Proceedings of the 18th International Conference on Availability, Reliability and Security"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3600160.3600193","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3600160.3600193","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T16:36:13Z","timestamp":1750178173000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3600160.3600193"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,8,29]]},"references-count":97,"alternative-id":["10.1145\/3600160.3600193","10.1145\/3600160"],"URL":"https:\/\/doi.org\/10.1145\/3600160.3600193","relation":{},"subject":[],"published":{"date-parts":[[2023,8,29]]},"assertion":[{"value":"2023-08-29","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}