{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,18]],"date-time":"2026-01-18T00:06:05Z","timestamp":1768694765201,"version":"3.49.0"},"reference-count":27,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2024,1,17]],"date-time":"2024-01-17T00:00:00Z","timestamp":1705449600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,17]],"date-time":"2024-01-17T00:00:00Z","timestamp":1705449600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["AI Ethics"],"published-print":{"date-parts":[[2024,11]]},"DOI":"10.1007\/s43681-023-00368-4","type":"journal-article","created":{"date-parts":[[2024,1,17]],"date-time":"2024-01-17T11:02:47Z","timestamp":1705489367000},"page":"1143-1174","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["A-XAI: adversarial machine learning for trustable explainability"],"prefix":"10.1007","volume":"4","author":[{"given":"Nishita","family":"Agrawal","sequence":"first","affiliation":[]},{"given":"Isha","family":"Pendharkar","sequence":"additional","affiliation":[]},{"given":"Jugal","family":"Shroff","sequence":"additional","affiliation":[]},{"given":"Jatin","family":"Raghuvanshi","sequence":"additional","affiliation":[]},{"given":"Akashdip","family":"Neogi","sequence":"additional","affiliation":[]},{"given":"Shruti","family":"Patil","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1745-5231","authenticated-orcid":false,"given":"Rahee","family":"Walambe","sequence":"additional","affiliation":[]},{"given":"Ketan","family":"Kotecha","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,1,17]]},"reference":[{"key":"368_CR1","unstructured":"M. D. F. ltd: Ai in healthcare market: Size, share, growth: 2022\u20132027,\u201d Market Data Forecast. [Online]. https:\/\/www.marketdataforecast.com\/market-reports\/artificial-intelligence-in-healthcare-market. Accessed 10 Nov 2022"},{"key":"368_CR2","doi-asserted-by":"publisher","DOI":"10.1155\/2021\/4907754","author":"Z Kong","year":"2021","unstructured":"Kong, Z., Xue, J., Wang, Y., Huang, L., Niu, Z., Li, F.: A survey on adversarial attack in the age of artificial intelligence. Wirel. Commun. Mob. Compu. (2021). https:\/\/doi.org\/10.1155\/2021\/4907754","journal-title":"Wirel. Commun. Mob. Compu."},{"key":"368_CR3","doi-asserted-by":"publisher","unstructured":"Gragnaniello, D., Marra, F., Poggi, G., Verdoliva, L.: Analysis of adversarial attacks against CNN-based image forgery detectors. In: 2018 26th European Signal Processing Conference (EUSIPCO), Rome, Italy, pp 967\u2013971 (2018), https:\/\/doi.org\/10.23919\/EUSIPCO.2018.8553560.","DOI":"10.23919\/EUSIPCO.2018.8553560"},{"key":"368_CR4","doi-asserted-by":"publisher","unstructured":"Saleh, A., Sukaik, R., Abu-Naser, S.S.: Brain tumor classification using deep learning. In: 2020 International Conference on Assistive and Rehabilitation Technologies (iCareTech), pp. 131\u2013136 (2020), https:\/\/doi.org\/10.1109\/iCareTech49914.2020.00032","DOI":"10.1109\/iCareTech49914.2020.00032"},{"key":"368_CR5","doi-asserted-by":"publisher","first-page":"10","DOI":"10.1109\/RBME.2022.3185292","volume":"15","author":"TA Soomro","year":"2022","unstructured":"Soomro, T.A., et al.: Image segmentation for MR brain tumor detection using machine learning: a review. IEEE Rev. Biomed. Eng. 15, 10 (2022). https:\/\/doi.org\/10.1109\/RBME.2022.3185292","journal-title":"IEEE Rev. Biomed. Eng."},{"issue":"1","key":"368_CR6","doi-asserted-by":"publisher","first-page":"68","DOI":"10.1109\/TTS.2023.3234203","volume":"4","author":"T Dhar","year":"2023","unstructured":"Dhar, T., Dey, N., Borra, S., Sherratt, R.S.: Challenges of Deep Learning in Medical Image Analysis\u2014Improving Explainability and Trust. IEEE Trans. Technol. Soc. 4(1), 68\u201375 (2023)","journal-title":"IEEE Trans. Technol. Soc."},{"key":"368_CR7","doi-asserted-by":"publisher","unstructured":"van der Velden, B.H.M., Kuijf, H.J., Gilhuijs, K.G.A., Viergever, M.A.: Explainable artificial intelligence (XAI) in deep learning-based medical image analysis. Med. Image Anal., 79, 102470, ISSN 1361\u20138415 (2022) https:\/\/doi.org\/10.1016\/j.media.2022.102470. https:\/\/www.sciencedirect.com\/science\/article\/pii\/S1361841522001177","DOI":"10.1016\/j.media.2022.102470"},{"key":"368_CR8","unstructured":"Mahapatra, D.: Cyclic generative adversarial networks with congruent image-report generation for explainable medical image analysis. arXiv preprint arXiv:2211.08424 (2022)"},{"key":"368_CR9","doi-asserted-by":"publisher","unstructured":"Ma, X., Niu, Y., Gu, L., Wang, Y., Zhao, Y., Bailey, J., Lu, F.: Understanding adversarial attacks on deep learning-based medical image analysis systems. Pattern Recognit. 110, 107332, ISSN 0031-3203 (2021) https:\/\/doi.org\/10.1016\/j.patcog.2020.107332. https:\/\/www.sciencedirect.com\/science\/article\/pii\/S0031320320301357","DOI":"10.1016\/j.patcog.2020.107332"},{"key":"368_CR10","doi-asserted-by":"crossref","unstructured":"Selvaganapathy, S., Sadasivam, S., & Raj, N.: SafeXAI: Explainable AI to detect adversarial attacks in electronic medical records. In: Intelligent Data Engineering and Analytics: Proceedings of the 9th International Conference on Frontiers in Intelligent Computing: Theory and Applications (FICTA 2021), pp. 501\u2013509. Singapore: Springer Nature Singapore (2022)","DOI":"10.1007\/978-981-16-6624-7_50"},{"key":"368_CR11","doi-asserted-by":"publisher","first-page":"10","DOI":"10.1080\/17517575.2022.2098537","volume":"15","author":"KA Eldrandaly","year":"2022","unstructured":"Eldrandaly, K.A., Abdel-Basset, M., Ibrahim, M., Abdel-Aziz, N.M.: J. Enterp. Inf. Syst. 15, 10 (2022). https:\/\/doi.org\/10.1080\/17517575.2022.2098537","journal-title":"J. Enterp. Inf. Syst."},{"issue":"01","key":"368_CR12","doi-asserted-by":"publisher","first-page":"3681","DOI":"10.1609\/aaai.v33i01.33013681","volume":"33","author":"A Ghorbani","year":"2019","unstructured":"Ghorbani, A., Abid, A., Zou, J.: Interpretation of neural networks is fragile. AAAI 33(01), 3681\u20133688 (2019)","journal-title":"AAAI"},{"key":"368_CR13","doi-asserted-by":"publisher","unstructured":"Kindermans, P.-J., Hooker, S., Adebayo, J., Alber, M., Sch\u00fctt, K., D\u00e4hne, S., Erhan, D., Kim, B.: The (Un)reliability of Saliency Methods. https:\/\/doi.org\/10.1007\/978-3-030-28954-6_14. (2019)","DOI":"10.1007\/978-3-030-28954-6_14"},{"key":"368_CR14","unstructured":"Heo, J., Joo, S., Moon, T.: Fooling neural network interpretations via adversarial model manipulation. Adv. Neural Inform. Process. Syst. 32 (2019)"},{"key":"368_CR15","unstructured":"Dombrowski, A.-K., et al.: Explanations can be manipulated and geometry is to blame. Adv. Neural Inf. Process. Syst. 32 (2019)"},{"key":"368_CR16","unstructured":"Zhang, X., et al.: Interpretable deep learning under fire. In: 29th {USENIX} Security Symposium ({USENIX} Security 20). (2020)"},{"issue":"11","key":"368_CR17","doi-asserted-by":"publisher","first-page":"508","DOI":"10.1038\/s42256-019-0104-6","volume":"1","author":"W Woods","year":"2019","unstructured":"Woods, W., Chen, J., Teuscher, C.: Adversarial explanations for understanding image classification decisions and improved neural network robustness. Nat. Mach. Intell. 1(11), 508\u2013516 (2019)","journal-title":"Nat. Mach. Intell."},{"key":"368_CR18","unstructured":"Chen, J., et al.: Robust attribution regularization. Adv Neural Inform Process Syst. 32 (2019)"},{"key":"368_CR19","unstructured":"Rieger L., Hansen, L.K.: A simple defense against adversarial attacks on heatmap explanations. arXiv preprint arXiv:2007.06381 (2020)"},{"key":"368_CR20","unstructured":"Goodfellow, I.J., Shlens, J., Szegedy, C.: Explaining and harnessing adversarial arXiv:1412.6572 (2014)"},{"key":"368_CR21","unstructured":"Madry, A., Makelov, A., Schmidt, L., Tsipras, D., Vladu, A.: Towards deep learning models resistant to adversarial attacks. CoRR, abs\/1706.06083. https:\/\/arxiv.org\/abs\/1706.06083 (2017)"},{"key":"368_CR22","doi-asserted-by":"crossref","unstructured":"Moosavi-Dezfooli, S.M., Fawzi, A., Frossard, P.: Deepfool: a simple and accurate method to fool deep neural networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2574\u20132582, (2016)","DOI":"10.1109\/CVPR.2016.282"},{"key":"368_CR23","doi-asserted-by":"publisher","first-page":"210","DOI":"10.1007\/978-1-349-20181-5_25","volume-title":"Game Theory","author":"S Hart","year":"1989","unstructured":"Hart, S.: Shapley value. In: Eatwell, J., Milgate, M., Newman, P. (eds.) Game Theory, pp. 210\u2013216. Palgrave Macmillan UK (1989)"},{"key":"368_CR24","unstructured":"Sundararajan, M., Najmi, A.: The many Shapley values for model explanation. In International conference on machine learning, pp. 9269\u20139278. In: PMLR (2020)"},{"key":"368_CR25","doi-asserted-by":"crossref","unstructured":"Selvaraju, R.R., Cogswell, M., Das, A., Vedantam, R., Parikh, D., Batra, D.:. Grad-cam: Visual explanations from deep networks via gradient-based localization. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 618\u2013626. (2017)","DOI":"10.1109\/ICCV.2017.74"},{"key":"368_CR26","doi-asserted-by":"publisher","unstructured":"Nickparvar, M.: Brain Tumor MRI Dataset . Kaggle. https:\/\/doi.org\/10.34740\/KAGGLE\/DSV\/2645886 (2021)","DOI":"10.34740\/KAGGLE\/DSV\/2645886"},{"key":"368_CR27","unstructured":"Nicolae, M. I., Sinn, M., Tran, M. N., Buesser, B., Rawat, A., Wistuba, M., Edwards, B.: Adversarial robustness toolbox v1. 0.0. arXiv preprint arXiv:1807.01069. (2018)"}],"container-title":["AI and Ethics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s43681-023-00368-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s43681-023-00368-4\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s43681-023-00368-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,17]],"date-time":"2025-01-17T11:06:37Z","timestamp":1737111997000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s43681-023-00368-4"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,1,17]]},"references-count":27,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2024,11]]}},"alternative-id":["368"],"URL":"https:\/\/doi.org\/10.1007\/s43681-023-00368-4","relation":{},"ISSN":["2730-5953","2730-5961"],"issn-type":[{"value":"2730-5953","type":"print"},{"value":"2730-5961","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,1,17]]},"assertion":[{"value":"6 March 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 October 2023","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 January 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}