{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,9]],"date-time":"2026-05-09T16:35:51Z","timestamp":1778344551164,"version":"3.51.4"},"reference-count":26,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2023,10,25]],"date-time":"2023-10-25T00:00:00Z","timestamp":1698192000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,10,25]],"date-time":"2023-10-25T00:00:00Z","timestamp":1698192000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Data Sci Anal"],"published-print":{"date-parts":[[2024,10]]},"DOI":"10.1007\/s41060-023-00458-w","type":"journal-article","created":{"date-parts":[[2023,10,25]],"date-time":"2023-10-25T13:01:37Z","timestamp":1698238897000},"page":"457-466","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":82,"title":["Enhancing trust and interpretability of complex machine learning models using local interpretable model agnostic shap explanations"],"prefix":"10.1007","volume":"18","author":[{"given":"Sai Ram Aditya","family":"Parisineni","sequence":"first","affiliation":[]},{"given":"Mayukha","family":"Pal","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,10,25]]},"reference":[{"key":"458_CR1","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.3027314","volume-title":"A Survey on Explainable Artificial Intelligence (XAI): Toward Medical XAI","author":"E Tjoa","year":"2020","unstructured":"Tjoa, E., Guan, C.: A Survey on Explainable Artificial Intelligence (XAI): Toward Medical XAI. P, IEEE Transactions on Neural Networks and Learning Systems (2020). https:\/\/doi.org\/10.1109\/TNNLS.2020.3027314"},{"key":"458_CR2","doi-asserted-by":"publisher","first-page":"82\u2013115","DOI":"10.1016\/j.inffus.2019.12.012","volume":"58","author":"AB Arrieta","year":"2019","unstructured":"Arrieta, A.B., D\u00edaz-Rodr\u00edguez, N., Del Ser, J., et al.: Explainable Artificial Intelligence (XAI): concepts, taxonomies, opportunities and challenges toward responsible AI. Inform. Fus. 58, 82\u2013115 (2019). https:\/\/doi.org\/10.1016\/j.inffus.2019.12.012","journal-title":"Inform. Fus."},{"key":"458_CR3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-30391-4_6","author":"S Anjomshoae","year":"2019","unstructured":"Anjomshoae, S., Fr\u00e4mling, K., Najjar, A.: Explanations of black-box model predictions by contextual importance and utility. Explain. Transp. Autonom. Agents Multi-Agent Syst. (2019). https:\/\/doi.org\/10.1007\/978-3-030-30391-4_6","journal-title":"Explain. Transp. Autonom. Agents Multi-Agent Syst."},{"key":"458_CR4","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN48605.2020.9206975.10.48550\/arXiv.1910.09840","author":"M Kohlbrenner","year":"2020","unstructured":"Kohlbrenner, M., Bauer, A., et al.: Towards best practice in explaining neural network decisions with LRP. Neural Netw. (2020). https:\/\/doi.org\/10.1109\/IJCNN48605.2020.9206975.10.48550\/arXiv.1910.09840","journal-title":"Neural Netw."},{"key":"458_CR5","doi-asserted-by":"publisher","first-page":"102520","DOI":"10.1016\/j.jag.2021.102520","volume":"103","author":"I Kakogeorgiou","year":"2021","unstructured":"Kakogeorgiou, I., Karantzalos, K.: Evaluating explainable artificial intelligence methods for multi-label deep learning classification tasks in remote sensing. Int J. Appl. Earth Observ. Geoinform 103, 102520 (2021). https:\/\/doi.org\/10.1016\/j.jag.2021.102520","journal-title":"Int J. Appl. Earth Observ. Geoinform"},{"key":"458_CR6","doi-asserted-by":"publisher","first-page":"115045","DOI":"10.1016\/j.eswa.2021.115045","volume":"179","author":"V La Gatta","year":"2021","unstructured":"La Gatta, V., Moscato, V., Postiglione, M., Sperl\u00ec, G.: CASTLE: Cluster-aided space transformation for local explanations. Expert Syst. Appl. 179, 115045 (2021). https:\/\/doi.org\/10.1016\/j.eswa.2021.115045","journal-title":"Expert Syst. Appl."},{"key":"458_CR7","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2104.12437","author":"A Darius","year":"2021","unstructured":"Darius, A., Romain, H., Vincent, G.: Towards Rigorous Interpretations: a Formalisation of Feature Attribution. Mach. Learn. (2021). https:\/\/doi.org\/10.48550\/arXiv.2104.12437","journal-title":"Mach. Learn."},{"key":"458_CR8","doi-asserted-by":"publisher","unstructured":"Kacper Sokol,\u00a0Alexander Hepburn,\u00a0Raul Santos-Rodriguez,\u00a0Peter Flach (2019). bLIMEy: Surrogate Prediction Explanations Beyond. LIME. https:\/\/doi.org\/10.48550\/arXiv.1910.13016","DOI":"10.48550\/arXiv.1910.13016"},{"key":"458_CR9","doi-asserted-by":"publisher","unstructured":"Scott Lundberg and Su-In Lee.: A unified approach to interpreting model predictions. In Proceedings of the 31st International Conference on Neural Information Processing Systems (NIPS\u201917). Curran Associates Inc., Red Hook, NY, USA, 4768\u20134777 (2017). https:\/\/doi.org\/10.48550\/arXiv.1705.07874","DOI":"10.48550\/arXiv.1705.07874"},{"key":"458_CR10","doi-asserted-by":"publisher","unstructured":"J\u00fcrgen, D.,\u00a0Sabrina, K.: Why model why? Assessing the strengths and limitations of LIME (2020). https:\/\/doi.org\/10.48550\/arXiv.2012.00093","DOI":"10.48550\/arXiv.2012.00093"},{"key":"458_CR11","doi-asserted-by":"publisher","unstructured":"David, W.,\u00a0Limor, G.,\u00a0Ankur, T.,\u00a0Luciano, F.: Local Explanations via Necessity and Sufficiency: Unifying Theory and Practice (2021). https:\/\/doi.org\/10.48550\/arXiv.2103.14651","DOI":"10.48550\/arXiv.2103.14651"},{"key":"458_CR12","doi-asserted-by":"publisher","DOI":"10.7717\/peerj-cs.479","volume":"7","author":"E Amparore","year":"2021","unstructured":"Amparore, E., Perotti, A., Bajardi, P.: To trust or not to trust an explanation: using LEAF to evaluate local linear XAI methods. PeerJ Computer Science. 7, e479 (2021). https:\/\/doi.org\/10.7717\/peerj-cs.479","journal-title":"PeerJ Computer Science."},{"key":"458_CR13","doi-asserted-by":"publisher","DOI":"10.21437\/Odyssey.2020-46","author":"H Bence Mark","year":"2020","unstructured":"Bence Mark, H., Finnian, K., Rob van s, Anil A,: Residual networks for resisting noise analysis of an embeddings-based spoofing countermeasure. Odyssey (2020). https:\/\/doi.org\/10.21437\/Odyssey.2020-46","journal-title":"Odyssey"},{"key":"458_CR14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW53098.2021.00260","author":"S Poppi","year":"2021","unstructured":"Poppi, S., Cornia, M., Baraldi, L., Cucchiara, R.: Revisiting The Evaluation of Class Activation Mapping for Explainability: A Novel Metric and Experimental Analysis. Computer Vision and Pattern Recognition (2021). https:\/\/doi.org\/10.1109\/CVPRW53098.2021.00260","journal-title":"Computer Vision and Pattern Recognition"},{"key":"458_CR15","doi-asserted-by":"publisher","unstructured":"Tulio Riberio, M., Singh, S., and Guestrin, C.: \u201cwhy should i trust you?\u201d: explaining the predictions of any classifier. In: Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and data Mining (KDD\u201916). Association for Computing Machinery, New York, NY, USA, 1135\u20131144 (2016). https:\/\/doi.org\/10.1145\/2939672.2939778","DOI":"10.1145\/2939672.2939778"},{"key":"458_CR16","unstructured":"Marco Tulio, R., Singh, S., Guestrin, c.:\u201cWhy should i trust you?\u201d: Explaining the predictions of any classifier (2016). arXiv:1602.04938"},{"key":"458_CR17","doi-asserted-by":"crossref","unstructured":"Ras, G., van, Gerven, M., Haselage, P.: Explanation methods in deep learning: users, values, concerns and challenges (2018). arXiv:1803.07517","DOI":"10.1007\/978-3-319-98131-4_2"},{"key":"458_CR18","unstructured":"Andrew S. R., Nina, C., Elisa, Z. H., Elena, L. G., Finale D.-V.: Evaluating the Interpretability of generative models by interactive reconstruction (2021). arXiv:2102.01264."},{"key":"458_CR19","doi-asserted-by":"publisher","unstructured":"Fryer, D., Strumke, I., and Hien, N.:Shapley Values for Feature Selection: The Good the Bad and the Axioms.\u00a0IEEE Access,(2021). https:\/\/doi.org\/10.48550\/arXiv.2102.10936","DOI":"10.48550\/arXiv.2102.10936"},{"key":"458_CR20","doi-asserted-by":"publisher","first-page":"103502","DOI":"10.1016\/j.artint.2021.103502","volume":"298","author":"K Aas","year":"2020","unstructured":"Aas, K., Jullum, M., L\u00f8land, A.: Explaining individual predictions when features are dependent: more accurate approximations to Shapley values. Artif. Intell. 298, 103502 (2020). https:\/\/doi.org\/10.1016\/j.artint.2021.103502","journal-title":"Artif. Intell."},{"key":"458_CR21","doi-asserted-by":"publisher","first-page":"59","DOI":"10.1109\/MCI.2021.3129959","volume":"17","author":"A Heuillet","year":"2022","unstructured":"Heuillet, A., Couthouis, F., D\u00edaz-Rodr\u00edguez, N.: Collective eXplainable AI: explaining cooperative strategies and agent contribution in multiagent reinforcement learning with Shapley values. IEEE Comput. Intel. Magaz. 17, 59\u201371 (2022). https:\/\/doi.org\/10.1109\/MCI.2021.3129959","journal-title":"IEEE Comput. Intel. Magaz."},{"key":"458_CR22","doi-asserted-by":"publisher","first-page":"56","DOI":"10.1038\/s42256-019-0138-9","volume":"2","author":"S Lundberg","year":"2020","unstructured":"Lundberg, S., Erion, G., Chen, H., et al.: From local explanations to global understanding with explainable AI for trees. Nat Mach Intell 2, 56\u201367 (2020). https:\/\/doi.org\/10.1038\/s42256-019-0138-9","journal-title":"Nat Mach Intell"},{"key":"458_CR23","doi-asserted-by":"publisher","first-page":"27","DOI":"10.1631\/FITEE.1700808","volume":"19","author":"Quan-shi Zhang Song-chun Zhu","year":"2018","unstructured":"Quan-shi Zhang Song-chun Zhu: Visual interpretability for deep learning: a survey. Front. Inf. Technol. Electron. Eng 19, 27\u201339 (2018). https:\/\/doi.org\/10.1631\/FITEE.1700808","journal-title":"Front. Inf. Technol. Electron. Eng"},{"key":"458_CR24","unstructured":"The Shapley Value explanations: https:\/\/towardsdatascience.com\/the-Shapley-value-for-ml-models-f1100bff78d1"},{"key":"458_CR25","unstructured":"Christoph Molnar: Interpretable Machine Learning: A Guide for Making Black Box Models Explainable (2nd ed.) (2022). christophm.github.io\/interpretable-ml-book\/"},{"key":"458_CR26","unstructured":"Mobile price Classification data from Kaggle: https:\/\/www.kaggle.com\/datasets\/iabhishekofficial\/mobile-price-classification"}],"container-title":["International Journal of Data Science and Analytics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s41060-023-00458-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s41060-023-00458-w\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s41060-023-00458-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,9]],"date-time":"2024-10-09T02:11:32Z","timestamp":1728439892000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s41060-023-00458-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,25]]},"references-count":26,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2024,10]]}},"alternative-id":["458"],"URL":"https:\/\/doi.org\/10.1007\/s41060-023-00458-w","relation":{},"ISSN":["2364-415X","2364-4168"],"issn-type":[{"value":"2364-415X","type":"print"},{"value":"2364-4168","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,10,25]]},"assertion":[{"value":"8 April 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"11 September 2023","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"25 October 2023","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper. The data obtained for our analysis are from the available public domain database made for academic research purposes.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}