{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,8]],"date-time":"2026-04-08T17:02:22Z","timestamp":1775667742116,"version":"3.50.1"},"reference-count":68,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2024,3,15]],"date-time":"2024-03-15T00:00:00Z","timestamp":1710460800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,3,15]],"date-time":"2024-03-15T00:00:00Z","timestamp":1710460800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Comput Soc Sc"],"published-print":{"date-parts":[[2024,4]]},"DOI":"10.1007\/s42001-024-00248-9","type":"journal-article","created":{"date-parts":[[2024,3,15]],"date-time":"2024-03-15T10:03:57Z","timestamp":1710497037000},"page":"587-623","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":30,"title":["A survey of explainable AI techniques for detection of fake news and hate speech on social media platforms"],"prefix":"10.1007","volume":"7","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8332-5630","authenticated-orcid":false,"given":"Vaishali U.","family":"Gongane","sequence":"first","affiliation":[]},{"given":"Mousami V.","family":"Munot","sequence":"additional","affiliation":[]},{"given":"Alwin D.","family":"Anuse","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,3,15]]},"reference":[{"issue":"8","key":"248_CR1","doi-asserted-by":"publisher","first-page":"5031","DOI":"10.1109\/TII.2022.3146552","volume":"18","author":"I Ahmed","year":"2022","unstructured":"Ahmed, I., Jeon, G., & Piccialli, F. (2022). From artificial intelligence to explainable artificial intelligence in industry 4.0: a survey on what, how, and where. IEEE Transactions on Industrial Informatics, 18(8), 5031\u20135042. https:\/\/doi.org\/10.1109\/TII.2022.3146552","journal-title":"IEEE Transactions on Industrial Informatics"},{"key":"248_CR2","volume-title":"Interpretable Machine Learning\u2014A Guide for Making Black Box Models Explainable","author":"C Molnar","year":"2022","unstructured":"Molnar, C. (2022). Interpretable Machine Learning\u2014A Guide for Making Black Box Models Explainable. Lulu."},{"key":"248_CR3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-02165-7","volume-title":"Neural Network Methods for Natural Language Processing Synthesis Lectures on Human Language Technologies","author":"Y Goldberg","year":"2017","unstructured":"Goldberg, Y. (2017). Neural Network Methods for Natural Language Processing Synthesis Lectures on Human Language Technologies. Springer."},{"key":"248_CR4","doi-asserted-by":"publisher","first-page":"5","DOI":"10.1007\/978-3-030-28954-6_1","volume":"11700","author":"W Samek","year":"2019","unstructured":"Samek, W., & M\u00fcller, K. R. (2019). Towards explainable artificial intelligence. Explainable AI, LNAI, 11700, 5\u201322. https:\/\/doi.org\/10.1007\/978-3-030-28954-6_1","journal-title":"Explainable AI, LNAI"},{"key":"248_CR5","unstructured":"Gohel, P., Singh, P., & Mohanty, M. (2021). Explainable AI: Current status and future directions. arXiv, https:\/\/arxiv.org\/abs\/2107.07045."},{"key":"248_CR6","unstructured":"Mohseni, S., Zarei, N., & Ragan, E. (2020). A multidisciplinary survey and framework for design and evaluation of explainable AI systems, https:\/\/arXiv.org\/1811.11839v5 [cs.HC]."},{"issue":"5","key":"248_CR7","doi-asserted-by":"publisher","first-page":"42","DOI":"10.1145\/3236009","volume":"51","author":"R Guidotti","year":"2018","unstructured":"Guidotti, R., Monreale, A., Ruggieri, S., Turini, F., Giannotti, F., & Pedreschi, D. (2018). A survey of methods for explaining black box models. ACM Computing Surveys., 51(5), 42. https:\/\/doi.org\/10.1145\/3236009","journal-title":"ACM Computing Surveys."},{"key":"248_CR8","doi-asserted-by":"publisher","first-page":"206","DOI":"10.1038\/s42256-019-0048-x","volume":"1","author":"C Rudin","year":"2019","unstructured":"Rudin, C. (2019). Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead. Nature Machine Intelligence, 1, 206\u2013215. https:\/\/doi.org\/10.1038\/s42256-019-0048-x","journal-title":"Nature Machine Intelligence"},{"key":"248_CR9","doi-asserted-by":"publisher","first-page":"129","DOI":"10.1007\/s13278-022-00951-3","volume":"12","author":"VU Gongane","year":"2022","unstructured":"Gongane, V. U., Munot, M. V., & Anuse, A. D. (2022). Detection and moderation of detrimental content on social media platforms: Current status and future directions. Social Network Analysis and Mining., 12, 129. https:\/\/doi.org\/10.1007\/s13278-022-00951-3","journal-title":"Social Network Analysis and Mining."},{"issue":"2","key":"248_CR10","doi-asserted-by":"publisher","first-page":"44","DOI":"10.1609\/aimag.v40i2.2850","volume":"40","author":"D Gunning","year":"2019","unstructured":"Gunning, D., & Aha, D. (2019). Darpa\u2019s explainable artificial intelligence (xai) program. AI Magazine, 40(2), 44\u201358.","journal-title":"AI Magazine"},{"key":"248_CR11","doi-asserted-by":"publisher","first-page":"11","DOI":"10.1007\/978-3-658-06984-1_2","volume-title":"Social Media","author":"C Wyrwoll","year":"2014","unstructured":"Wyrwoll, C. (2014). User-generated content. Social Media (pp. 11\u201345). Springer. https:\/\/doi.org\/10.1007\/978-3-658-06984-1_2"},{"issue":"2","key":"248_CR12","doi-asserted-by":"publisher","first-page":"204","DOI":"10.1016\/j.cjca.2021.09.004","volume":"38","author":"J Petch","year":"2022","unstructured":"Petch, J., Di, S., & Nelson, W. (2022). Opening the Black Box: the promise and limitations of explainable machine learning in cardiology. Canadian Journal of Cardiology, 38(2), 204\u2013213. https:\/\/doi.org\/10.1016\/j.cjca.2021.09.004","journal-title":"Canadian Journal of Cardiology"},{"key":"248_CR13","doi-asserted-by":"publisher","DOI":"10.1587\/transinf.2021EDR0003","author":"K Mishima","year":"2022","unstructured":"Mishima, K., & Yamana, H. (2022). A survey on explainable fake news detection. IEICE Transactions on Information and Systems. https:\/\/doi.org\/10.1587\/transinf.2021EDR0003","journal-title":"IEICE Transactions on Information and Systems"},{"key":"248_CR14","doi-asserted-by":"publisher","DOI":"10.1007\/s42001-023-00200-3","author":"J Langguth","year":"2023","unstructured":"Langguth, J., Schroeder, D. T., Filkukov\u00e1, P., Brenner, S., Phillips, J., & Pogorelov, K. (2023). COCO: an annotated Twitter dataset of COVID-19 conspiracy theories. Journal of Computational Social Science. https:\/\/doi.org\/10.1007\/s42001-023-00200-3","journal-title":"Journal of Computational Social Science"},{"issue":"4","key":"248_CR15","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3232676","volume":"51","author":"P Fortuna","year":"2018","unstructured":"Fortuna, P., & Nunes, S. (2018). A survey on automatic detection of hate speech in text. ACM Computing Surveys., 51(4), 1\u201330. https:\/\/doi.org\/10.1145\/3232676","journal-title":"ACM Computing Surveys."},{"key":"248_CR16","doi-asserted-by":"publisher","unstructured":"Nobata, C., Tetreault, JR., Thomas, A., Mehdad, Y., & Chang Y. (2016). Abusive Language Detection in Online User Content. In: Proceedings of the 25th International Conference on World Wide Web, pp. 145\u2013153. https:\/\/doi.org\/10.1145\/2872427.2883062.","DOI":"10.1145\/2872427.2883062"},{"issue":"1","key":"248_CR17","doi-asserted-by":"publisher","first-page":"22","DOI":"10.1145\/3137597.3137600","volume":"19","author":"K Shu","year":"2017","unstructured":"Shu, K., Sliva, A., Wang, S., Tang, J., & Liu, H. (2017). Fake news detection on social media: a data mining perspective. ACM SIGKDD Explorations Newsletter, 19(1), 22\u201336. https:\/\/doi.org\/10.1145\/3137597.3137600","journal-title":"ACM SIGKDD Explorations Newsletter"},{"key":"248_CR18","doi-asserted-by":"crossref","unstructured":"Davidson, T., Warmsley, D., Macy, M., & Weber, I. (2017). Automated hate speech detection and the problem of offensive language. In: Proceedings of the 11th International AAAI Social Media, ICWSM \u201917: 512\u2013515.","DOI":"10.1609\/icwsm.v11i1.14955"},{"key":"248_CR19","doi-asserted-by":"publisher","first-page":"110273","DOI":"10.1016\/j.knosys.2023.110273","volume":"263","author":"W Saeed","year":"2023","unstructured":"Saeed, W., & Omlin, C. (2023). Explainable AI (XAI): A systematic meta-survey of current challenges and future opportunities. Knowledge-Based Systems, 263, 110273. https:\/\/doi.org\/10.1016\/j.knosys.2023.110273","journal-title":"Knowledge-Based Systems"},{"issue":"4","key":"248_CR20","doi-asserted-by":"publisher","first-page":"102569","DOI":"10.1016\/j.ipm.2021.102569","volume":"58","author":"J Ayoub","year":"2021","unstructured":"Ayoub, J., Yang, X. J., & Zhou, F. (2021). Combat COVID-19 infodemic using explainable natural language processing models. Information Processing & Management, 58(4), 102569. https:\/\/doi.org\/10.1016\/j.ipm.2021.102569","journal-title":"Information Processing & Management"},{"key":"248_CR21","unstructured":"CDC. (2020). Coronavirus disease 2019 (COVID-19). https:\/\/www.cdc.gov\/coronavirus\/2019-ncov\/index.html, cited on 04 Oct 2024."},{"key":"248_CR22","unstructured":"Duarte, N., Llanso, E., & Loup, A. (2017). Mixed Messages? The Limits of Automated Social Media Content Analysis."},{"issue":"5","key":"248_CR23","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3395046","volume":"53","author":"X Zhou","year":"2020","unstructured":"Zhou, X., & Zafarani, R. (2020). A survey of fake news: fundamental theories, detection methods, and opportunities. ACM Computing Surveys, 53(5), 1\u201330. https:\/\/doi.org\/10.1145\/3395046","journal-title":"ACM Computing Surveys"},{"key":"248_CR24","doi-asserted-by":"publisher","first-page":"18","DOI":"10.3390\/e23010018","volume":"23","author":"P Linardatos","year":"2021","unstructured":"Linardatos, P., Papastefanopoulos, V., & Kotsiantis, S. (2021). Explainable AI: A review of machine learning interpretability methods. Entropy, 23, 18. https:\/\/doi.org\/10.3390\/e23010018","journal-title":"Entropy"},{"key":"248_CR25","unstructured":"Doshi-Velez, F., & Kim, B. (2017). Towards a rigorous science of interpretable machine learning. https:\/\/arXiv.org\/1702.08608."},{"key":"248_CR26","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1016\/j.artint.2018.07.007","volume":"267","author":"T Miller","year":"2019","unstructured":"Miller, T. (2019). Explanation in artificial intelligence: Insights from the social sciences. Artificial Intelligence., 267, 1\u201338.","journal-title":"Artificial Intelligence."},{"key":"248_CR27","doi-asserted-by":"publisher","DOI":"10.1201\/9781003333425","volume-title":"Explainable AI in Healthcare: Unboxing Machine Learning for Biomedicine","author":"KV Sakhare","year":"2023","unstructured":"Sakhare, K. V., Vyas, V., & Munot, M. (2023). Predictive analytics in hospital readmission for diabetes risk patients. Explainable AI in Healthcare: Unboxing Machine Learning for Biomedicine. Chapman and Hall\/CRC. https:\/\/doi.org\/10.1201\/9781003333425"},{"key":"248_CR28","doi-asserted-by":"crossref","unstructured":"Ribeiro, MT., Singh, S., & Guestrin, C. (2016). \u201cWhy Should I Trust You?\u201d Explaining the Predictions of Any Classifier. https:\/\/arXiv.org\/1602.04938v1 16 Feb 2016","DOI":"10.18653\/v1\/N16-3020"},{"issue":"2","key":"248_CR29","doi-asserted-by":"publisher","first-page":"143","DOI":"10.1111\/hir.12320","volume":"38","author":"SB Naeem","year":"2021","unstructured":"Naeem, S. B., Bhatti, R., & Khan, A. (2021). An exploration of how fake news is taking over social media and putting public health at risk. Health Information & Libraries Journal., 38(2), 143\u2013149. https:\/\/doi.org\/10.1111\/hir.12320. Epub 2020 Jul 12. PMID: 32657000; PMCID: PMC7404621.","journal-title":"Health Information & Libraries Journal."},{"key":"248_CR30","unstructured":"Mosca, E. (2020). Explainability of Hate Speech Detection Models. Master. Master Thesis, Technische Universitat Munchen."},{"key":"248_CR31","doi-asserted-by":"crossref","unstructured":"Balkir, E., Kiritchenko, S., Nejadgholi, I., Fraser, K. (2022). Challenges in Applying Explainability Methods to Improve the Fairness of NLP Models. In: Proceedings of the 2nd Workshop on Trustworthy Natural Language Processing (TrustNLP 2022), pp 80\u201392, Seattle, USA Association for Computational Linguistics.","DOI":"10.18653\/v1\/2022.trustnlp-1.8"},{"issue":"28","key":"248_CR32","first-page":"307","volume":"2","author":"LS Shapley","year":"1953","unstructured":"Shapley, L. S. (1953). A value for n-person games. Contribution Theory Games, 2(28), 307\u2013317.","journal-title":"Contribution Theory Games"},{"key":"248_CR33","unstructured":"Lundberg, S.M., & Lee, S.-I. (2017). A unified approach to interpreting model predictions. In: Advances in Neural Information Processing Systems, pp. 4765\u20134774."},{"key":"248_CR34","unstructured":"Mosca, E., Szigeti, E., Tragianni, S., Gallagher, D., Groh, G. (2022). SHAP-Based explanation methods: a review for NLP interpretability. In: Proceedings of the 29th International Conference on Computational Linguistics, pp 4593\u20134603."},{"key":"248_CR35","doi-asserted-by":"crossref","unstructured":"Shu, K., Cui, L., Wang, S., Lee, D., Liu, H. (2019). DEFEND: explainable fake news detection. In: Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery Data Mining, pp. 395\u2013405.","DOI":"10.1145\/3292500.3330935"},{"key":"248_CR36","doi-asserted-by":"crossref","unstructured":"Yang, F., Pentyala, S.K., Mohseni, S., Du, M., Yuan, H., Linder, R., Ragan, E.D., Ji, S., & Hu, X. (2019). XFake: Explainable fake news detector with visualizations. In: Proc. World Wide Web Conf., pp. 3600\u20133604.","DOI":"10.1145\/3308558.3314119"},{"key":"248_CR37","doi-asserted-by":"crossref","unstructured":"Lu, Y.-J., & Li, C.-T. (2020). GCAN: Graph-aware co-attention networks for explainable fake news detection on social media: In: Proc. 58th Ann. Meeting of the Association for Computational Linguistics, pp. 505\u2013514.","DOI":"10.18653\/v1\/2020.acl-main.48"},{"issue":"5","key":"248_CR38","doi-asserted-by":"publisher","first-page":"8783","DOI":"10.1609\/aaai.v34i05.6405","volume":"34","author":"LMS Khoo","year":"2020","unstructured":"Khoo, L. M. S., Chieu, H. L., Qian, Z., & Jiang, J. (2020). Interpretable rumor detection in microblogs by attending to user interactions. Proceedings AAAI Conference Artificial Intelligence, 34(5), 8783\u20138790.","journal-title":"Proceedings AAAI Conference Artificial Intelligence"},{"key":"248_CR39","doi-asserted-by":"publisher","first-page":"93","DOI":"10.3390\/bdcc6030093","volume":"6","author":"X Ge","year":"2022","unstructured":"Ge, X., Hao, S., Li, Y., Wei, B., & Zhang, M. (2022). Hierarchical co-attention selection network for interpretable fake news detection. Big Data and Cognitive Computing., 6, 93. https:\/\/doi.org\/10.3390\/bdcc6030093","journal-title":"Big Data and Cognitive Computing."},{"key":"248_CR40","unstructured":"Smilkov, D. et al. (2017). Smoothgrad: removing noise by adding noise. arXiv preprint https:\/\/arXiv.org\/1706.03825"},{"key":"248_CR41","unstructured":"Sundararajan, M., Taly, A., & Yan, Q. (2017). Axiomatic attribution for deep networks. In: International Conference on Machine Learning, pp 3319\u20133328. PMLR."},{"issue":"2","key":"248_CR42","first-page":"51","volume":"12","author":"A Mumtahina","year":"2022","unstructured":"Mumtahina, A., Shahadat, H. M., Raihan Ul, I., & Karl, A. (2022). Explainable text classification model for COVID-19 fake news detection. Journal of Internet Services and Information Security (JISIS), 12(2), 51\u201369.","journal-title":"Journal of Internet Services and Information Security (JISIS)"},{"key":"248_CR43","volume-title":"Lecture Notes in Informatics (LNI)","author":"D Nandini","year":"2022","unstructured":"Nandini, D., & Schmid, U. (2022). Explaining Hate Speech Classification with Model-Agnostic MethodsW6: Text Mining and Generation. Lecture Notes in Informatics (LNI). Gesellschaft f\u00fcr Informatik."},{"key":"248_CR44","doi-asserted-by":"publisher","DOI":"10.29173\/irie416","author":"C Diogo","year":"2021","unstructured":"Diogo, C., & Zubiaga, A. (2021). Ethical and technical challenges of AI in tackling hate speech. The International Review of Information Ethics. https:\/\/doi.org\/10.29173\/irie416","journal-title":"The International Review of Information Ethics"},{"issue":"7","key":"248_CR45","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0130140","volume":"10","author":"S Bach","year":"2015","unstructured":"Bach, S., Binder, A., Montavon, G., Klauschen, F., M\u00fcller, K. R., & Samek, W. (2015). On pixel-wise explanations for non-linear classifier decisions by layer-wise relevance propagation. PLoS ONE, 10(7), e0130140.","journal-title":"PLoS ONE"},{"issue":"3","key":"248_CR46","doi-asserted-by":"publisher","first-page":"50","DOI":"10.1609\/aimag.v38i3.2741","volume":"38","author":"B Goodman","year":"2017","unstructured":"Goodman, B., & Flaxman, S. (2017). European union regulations on algorithmic decision-making and a \u201cright to explanation.\u201d AI Magazine, 38(3), 50\u201357. https:\/\/doi.org\/10.1609\/aimag.v38i3.2741","journal-title":"AI Magazine"},{"key":"248_CR47","doi-asserted-by":"publisher","first-page":"187","DOI":"10.1080\/0952813X.2017.1409284","volume":"30","author":"S Malmasi","year":"2018","unstructured":"Malmasi, S., & Zampieri, M. (2018). Challenges in discriminating profanity from hate speech. Journal of Experimental & Theoretical Artificial Intelligence, 30, 187\u2013202. https:\/\/doi.org\/10.1080\/0952813X.2017.1409284","journal-title":"Journal of Experimental & Theoretical Artificial Intelligence"},{"key":"248_CR48","doi-asserted-by":"publisher","unstructured":"Schmidt, A., & Wiegand M. (2017). A survey on hate speech detection using natural language processing. In: Proceedings of the Fifth International Workshop on Natural Language Processing for Social Media, pp. 1\u201310. https:\/\/doi.org\/10.18653\/v1\/W17-1101.","DOI":"10.18653\/v1\/W17-1101"},{"key":"248_CR49","unstructured":"Simonyan, K., Vedaldi, A., & Zisserman, A. (2014). Deep inside convolutional networks: Visualising image classification models and saliency maps. In: 2nd international conference on learning representations, ICLR 2014."},{"key":"248_CR50","unstructured":"Devlin J, Chang M, Lee K, Toutanova K. (2019). BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. https:\/\/arXiv.org\/1810.04805."},{"issue":"7714","key":"248_CR51","doi-asserted-by":"publisher","first-page":"324","DOI":"10.1038\/d41586-018-05707-8","volume":"559","author":"J Zou","year":"2018","unstructured":"Zou, J., & Schiebinger, L. (2018). AI can be sexist and racist\u2014it\u2019s time to make it fair. Nature, 559(7714), 324\u2013326. https:\/\/doi.org\/10.1038\/d41586-018-05707-8","journal-title":"Nature"},{"issue":"8","key":"248_CR52","doi-asserted-by":"publisher","first-page":"832","DOI":"10.3390\/electronics8080832","volume":"8","author":"DV Carvalho","year":"2019","unstructured":"Carvalho, D. V., Pereira, E. M., & Cardoso, J. S. (2019). Machine learning interpretability: a survey on methods and metrics. Electronics, 8(8), 832. https:\/\/doi.org\/10.3390\/electronics8080832","journal-title":"Electronics"},{"key":"248_CR53","doi-asserted-by":"crossref","unstructured":"Gilpin, L.H., Bau, D., Yuan, B.Z., Bajwa, A., Specter, M., Kagal, L. (2018). Explaining explanations: An overview of interpretability of machine learning. In: Proceedings of the IEEE 5th International Conference on Data Science and Advanced Analytics (DSAA), Turin, Italy, 1\u20133 October 2018; pp. 80\u201389.","DOI":"10.1109\/DSAA.2018.00018"},{"key":"248_CR54","doi-asserted-by":"publisher","DOI":"10.1007\/s10618-022-00831-6","author":"R Guidotti","year":"2022","unstructured":"Guidotti, R. (2022). Counterfactual explanations and how to find them: Literature review and benchmarking. Data Mining and Knowledge Discovery. https:\/\/doi.org\/10.1007\/s10618-022-00831-6","journal-title":"Data Mining and Knowledge Discovery"},{"issue":"5","key":"248_CR55","doi-asserted-by":"publisher","first-page":"593","DOI":"10.3390\/electronics10050593","volume":"10","author":"J Zhou","year":"2021","unstructured":"Zhou, J., Gandomi, A. H., Chen, F., & Holzinger, A. (2021). Evaluating the quality of machine learning explanations: A survey on methods and metrics. Electronics, 10(5), 593. https:\/\/doi.org\/10.3390\/electronics10050593","journal-title":"Electronics"},{"issue":"13s","key":"248_CR56","doi-asserted-by":"publisher","first-page":"42","DOI":"10.1145\/3583558","volume":"55","author":"M Nauta","year":"2022","unstructured":"Nauta, M., Trienes, J., Pathak, S., Nguyen, E., Peters, M., Schmitt, Y., Schl\u00f6tterer, J., Van Keulen, M., & Seifert, C. (2022). From anecdotal evidence to quantitative evaluation methods: A systematic review on evaluating explainable AI. ACM Computing Surveys., 55(13s), 42. https:\/\/doi.org\/10.1145\/3583558","journal-title":"ACM Computing Surveys."},{"key":"248_CR57","unstructured":"Hsiao, J. H. W., Ngai, H. H. T., Qiu, L., Yang, Y., & Cao, C. C. (2021). Roadmap of designing cognitive metrics for explainable artificial intelligence (XAI). CoRR, abs\/2108.01737."},{"key":"248_CR58","volume-title":"Explainable AI: Interpreting, Explaining and Visualizing Deep Learning, ser. Lecture notes in computer science","author":"M Ancona","year":"2018","unstructured":"Ancona, M., Ceolini, E., Oztireli, C., & Gross, M. (2018). Gradient-based attribution method. Explainable AI: Interpreting, Explaining and Visualizing Deep Learning, ser. Lecture notes in computer science. Springer."},{"key":"248_CR59","doi-asserted-by":"publisher","unstructured":"Kadir, M., Mosavi, A., & Sonntag, D. (2023). Assessing XAI: unveiling evaluation metrics for local explanation, taxonomies, key concepts, and practical applications. https:\/\/doi.org\/10.31224\/2989.","DOI":"10.31224\/2989"},{"key":"248_CR60","unstructured":"Hooker, S., Erhan, D., Kindermans, P.-J., & Kim, B. A. (2019). Benchmark for Interpretability Methods in Deep Neural Networks. In: Proceedings of the 33rd Conference on Neural Information Processing Systems (NeurIPS 2019), Vancouver, BC, Canada, pp. 8\u201314."},{"key":"248_CR61","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1016\/j.dsp.2017.10.011","volume":"73","author":"G Montavon","year":"2018","unstructured":"Montavon, G., Samek, W., & M\u00fcller, K.-R. (2018). Methods for interpreting and understanding deep neural networks. Digital Signal Processing., 73, 1\u201315.","journal-title":"Digital Signal Processing."},{"key":"248_CR62","doi-asserted-by":"publisher","first-page":"26","DOI":"10.3390\/asi6010026","volume":"6","author":"A Oblizanov","year":"2023","unstructured":"Oblizanov, A., Shevskaya, N., Kazak, A., Rudenko, M., & Dorofeeva, A. (2023). Evaluation metrics research for explainable artificial intelligence global methods using synthetic data. Applied System Innovation., 6, 26. https:\/\/doi.org\/10.3390\/asi6010026","journal-title":"Applied System Innovation."},{"key":"248_CR63","doi-asserted-by":"crossref","unstructured":"Pitroda, V., Fouda, M. M., & Fadlullah, Z. M. (2021). An explainable AI model for interpretable lung disease classification. In: Proceedings of the 2021 IEEE International Conference on Internet of Things and Intelligence Systems, 2021, pp. 98\u2013103.","DOI":"10.1109\/IoTaIS53735.2021.9628573"},{"key":"248_CR64","unstructured":"Luotsinen, L.J., Oskarsson, D., Svenmarck, P., & Bolin, U.W. (2019). Explainable artificial intelligence: Exploring XAI techniques in military deep learning applications. FOI-R--4849\u2014SE ISSN 1650\u20131942."},{"key":"248_CR65","unstructured":"Arya, V. et al. (2019). One explanation does not fit all: A toolkit and taxonomy of ai explainability techniques."},{"issue":"01","key":"248_CR66","doi-asserted-by":"publisher","first-page":"56","DOI":"10.1109\/TVCG.2019.2934619","volume":"26","author":"J Wexler","year":"2020","unstructured":"Wexler, J., et al. (2020). The what-if tool: interactive probing of machine learning models. IEEE Transactions on Visualization & Computer Graphics, 26(01), 56\u201365. https:\/\/doi.org\/10.1109\/TVCG.2019.2934619","journal-title":"IEEE Transactions on Visualization & Computer Graphics"},{"key":"248_CR67","doi-asserted-by":"publisher","unstructured":"Nori, H., Jenkins, S., Koch, P., & Caruana, P. (2019). InterpretML: A unified framework for machine learning interpretability. https:\/\/arXiv.org\/1909.09223v1. https:\/\/doi.org\/10.48550\/arXiv.1909.09223.","DOI":"10.48550\/arXiv.1909.09223"},{"key":"248_CR68","doi-asserted-by":"crossref","unstructured":"Amparore, E.G., Perotti, A., & Bajardi, P. (2021). To trust or not to trust an explanation: using LEAF to evaluate local linear XAI methods. https:\/\/arXiv.org\/2106.00461v1 1 Jun 2021.","DOI":"10.7717\/peerj-cs.479"}],"container-title":["Journal of Computational Social Science"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s42001-024-00248-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s42001-024-00248-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s42001-024-00248-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,7,16]],"date-time":"2024-07-16T04:06:13Z","timestamp":1721102773000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s42001-024-00248-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,3,15]]},"references-count":68,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2024,4]]}},"alternative-id":["248"],"URL":"https:\/\/doi.org\/10.1007\/s42001-024-00248-9","relation":{},"ISSN":["2432-2717","2432-2725"],"issn-type":[{"value":"2432-2717","type":"print"},{"value":"2432-2725","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,3,15]]},"assertion":[{"value":"6 September 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 January 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"15 March 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors have no conflicts of interest to declare.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}