{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,13]],"date-time":"2026-01-13T01:27:56Z","timestamp":1768267676350,"version":"3.49.0"},"reference-count":42,"publisher":"Springer Science and Business Media LLC","issue":"10","license":[{"start":{"date-parts":[[2025,1,21]],"date-time":"2025-01-21T00:00:00Z","timestamp":1737417600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,21]],"date-time":"2025-01-21T00:00:00Z","timestamp":1737417600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62173328"],"award-info":[{"award-number":["62173328"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62394330"],"award-info":[{"award-number":["62394330"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Vis Comput"],"published-print":{"date-parts":[[2025,8]]},"DOI":"10.1007\/s00371-025-03803-1","type":"journal-article","created":{"date-parts":[[2025,1,21]],"date-time":"2025-01-21T13:34:00Z","timestamp":1737466440000},"page":"7249-7267","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["Shapley value-based class activation mapping for improved explainability in neural networks"],"prefix":"10.1007","volume":"41","author":[{"given":"Huaiguang","family":"Cai","sequence":"first","affiliation":[]},{"given":"Yang","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Yongqiang","family":"Tang","sequence":"additional","affiliation":[]},{"given":"Zhengya","family":"Sun","sequence":"additional","affiliation":[]},{"given":"Wensheng","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,1,21]]},"reference":[{"key":"3803_CR1","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1186\/s12911-019-1002-x","volume":"20","author":"J Amann","year":"2020","unstructured":"Amann, J., Blasimme, A., Vayena, E., Frey, D., Madai, V.I.: Explainability for artificial intelligence in healthcare: a multidisciplinary perspective. BMC Med. Inf. Decis. Making 20, 1\u20139 (2020)","journal-title":"BMC Med. Inf. Decis. Making"},{"key":"3803_CR2","doi-asserted-by":"publisher","first-page":"10142","DOI":"10.1109\/TITS.2021.3122865","volume":"23","author":"D Omeiza","year":"2022","unstructured":"Omeiza, D., Webb, H., Jirotka, M., Kunze, L.: Explanations in autonomous driving: a survey. IEEE Trans. Intell. Transp. Syst. 23, 10142\u201310162 (2022)","journal-title":"IEEE Trans. Intell. Transp. Syst."},{"key":"3803_CR3","doi-asserted-by":"publisher","first-page":"471","DOI":"10.1016\/j.vrih.2023.06.009","volume":"5","author":"X Tian","year":"2023","unstructured":"Tian, X., Wu, Z., Cao, J., Chen, S., Dong, X.: ILIDViz: an incremental learning-based visual analysis system for network anomaly detection. Virtual Reality & Intell. Hardw. 5, 471\u2013489 (2023)","journal-title":"Virtual Reality & Intell. Hardw."},{"key":"3803_CR4","doi-asserted-by":"publisher","first-page":"444","DOI":"10.1016\/j.vrih.2022.08.008","volume":"4","author":"Z Shi","year":"2022","unstructured":"Shi, Z., Li, M., Wang, M., Shen, J., Chen, W., Luo, X.: NPIPVis: a visualization system involving NBA visual analysis and integrated learning model prediction. Virtual Reality & Intell. Hardw. 4, 444\u2013458 (2022)","journal-title":"Virtual Reality & Intell. Hardw."},{"key":"3803_CR5","doi-asserted-by":"publisher","first-page":"e2230","DOI":"10.1002\/cav.2230","volume":"35","author":"D Bellenger","year":"2024","unstructured":"Bellenger, D., Chen, M., Xu, Z.: Facial emotion recognition with a reduced feature set for video game and metaverse avatars. Comput. Animat. Virtual Worlds 35, e2230 (2024)","journal-title":"Comput. Animat. Virtual Worlds"},{"key":"3803_CR6","doi-asserted-by":"publisher","first-page":"e2234","DOI":"10.1002\/cav.2234","volume":"35","author":"Y Zhang","year":"2024","unstructured":"Zhang, Y., Zhang, Y., Chen, L., Yin, B., Sun, Y.: Frontal person image generation based on arbitrary-view human images. Comput. Animat. Virtual Worlds 35, e2234 (2024)","journal-title":"Comput. Animat. Virtual Worlds"},{"key":"3803_CR7","doi-asserted-by":"publisher","first-page":"3216","DOI":"10.1109\/TVCG.2018.2866090","volume":"25","author":"B Sheng","year":"2019","unstructured":"Sheng, B., Li, P., Gao, C., Ma, K.: Deep neural representation guided face sketch synthesis. IEEE Trans. Visualization Comput. Gr. 25, 3216\u20133230 (2019)","journal-title":"IEEE Trans. Visualization Comput. Gr."},{"key":"3803_CR8","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2020.101654","volume":"61","author":"Y Shen","year":"2020","unstructured":"Shen, Y., Sheng, B., Fang, R., Li, H., Dai, L., Stolte, S.E., Qin, J., Jia, W., Shen, D.: Domain-invariant interpretable fundus image quality assessment. Med. Image Anal. 61, 101654 (2020)","journal-title":"Med. Image Anal."},{"key":"3803_CR9","doi-asserted-by":"publisher","first-page":"232","DOI":"10.1016\/j.media.2015.09.002","volume":"26","author":"B Yin","year":"2015","unstructured":"Yin, B., Li, H., Sheng, B., Hou, X., Chen, Y., Wu, W., Li, P., Shen, R., Bao, Y., Jia, W.: Vessel extraction from non-fluorescein fundus images using orientation-aware detector. Med. Image Anal. 26, 232\u2013242 (2015)","journal-title":"Med. Image Anal."},{"key":"3803_CR10","doi-asserted-by":"publisher","first-page":"6627","DOI":"10.1007\/s00371-023-03189-y","volume":"40","author":"G Huang","year":"2024","unstructured":"Huang, G., Wen, Y., Qian, B., Bi, L., Chen, T., Sheng, B.: Attention-based multi-scale feature fusion network for myopia grading using optical coherence tomography images. Visual Comput. 40, 6627\u20136638 (2024)","journal-title":"Visual Comput."},{"key":"3803_CR11","doi-asserted-by":"crossref","unstructured":"Mamarasulov, S., Chen, L., Chen, C., Li, Y., Wang, C.: Data augmentation with attention framework for robust deepfake detection. The Visual Computer, 1\u201320 (2024)","DOI":"10.1007\/s00371-024-03690-y"},{"key":"3803_CR12","doi-asserted-by":"crossref","unstructured":"Zhou, B., Khosla, A., Lapedriza, \u00c0., Oliva, A., Torralba, A.: Learning Deep Features for Discriminative Localization. In: Conference on Computer Vision and Pattern Recognition, pp. 2921\u20132929 (2016)","DOI":"10.1109\/CVPR.2016.319"},{"key":"3803_CR13","unstructured":"Draelos, R.L., Carin, L.: Use HiResCAM instead of Grad-CAM for Faithful Explanations of Convolutional Neural Networks. arXiv preprint arXiv:2011.08891 (2020)"},{"key":"3803_CR14","doi-asserted-by":"crossref","unstructured":"Selvaraju, R.R., Cogswell, M., Das, A., Vedantam, R., Parikh, D., Batra, D.: Grad-CAM: Visual Explanations from Deep Networks via Gradient-Based Localization. In: International Joint Conference on Artificial Intelligence, pp. 618\u2013626 (2017)","DOI":"10.1109\/ICCV.2017.74"},{"key":"3803_CR15","doi-asserted-by":"crossref","unstructured":"Wang, H., Wang, Z., Du, M., Yang, F., Zhang, Z., Ding, S., Mardziel, P., Hu, X.: Score-CAM: Score-Weighted Visual Explanations for Convolutional Neural Networks. In: Conference on Computer Vision and Pattern Recognition Workshops, pp. 111\u2013119 (2020)","DOI":"10.1109\/CVPRW50498.2020.00020"},{"key":"3803_CR16","doi-asserted-by":"crossref","unstructured":"Desai, S., Ramaswamy, H.G.: Ablation-CAM: Visual Explanations for Deep Convolutional Network via Gradient-free Localization. In: Winter Conference on Applications of Computer Vision, pp. 972\u2013980 (2020)","DOI":"10.1109\/WACV45572.2020.9093360"},{"key":"3803_CR17","doi-asserted-by":"crossref","unstructured":"Chattopadhay, A., Sarkar, A., Howlader, P., Balasubramanian, V.N.: Grad-CAM++: Improved Visual Explanations for Deep Convolutional Networks. In: Winter Conference on Applications of Computer Vision, pp. 839\u2013847 (2018)","DOI":"10.1109\/WACV.2018.00097"},{"key":"3803_CR18","doi-asserted-by":"crossref","unstructured":"Fu, R., Hu, Q., Dong, X., Guo, Y., Gao, Y., Li, B.: Axiom-based Grad-CAM: Towards Accurate Visualization and Explanation of CNNs. arXiv preprint arXiv:2008.02312 (2020)","DOI":"10.5244\/C.34.146"},{"key":"3803_CR19","doi-asserted-by":"publisher","first-page":"5875","DOI":"10.1109\/TIP.2021.3089943","volume":"30","author":"P Jiang","year":"2021","unstructured":"Jiang, P., Zhang, C., Hou, Q., Cheng, M., Wei, Y.: LayerCAM: Exploring hierarchical class activation maps For localization. IEEE Trans. Image Process. 30, 5875\u20135888 (2021)","journal-title":"IEEE Trans. Image Process."},{"key":"3803_CR20","doi-asserted-by":"crossref","unstructured":"Jung, H., Oh, Y.: Towards Better Explanations of Class Activation Mapping. In: International Conference on Computer Vision, pp. 1316\u20131324 (2021)","DOI":"10.1109\/ICCV48922.2021.00137"},{"key":"3803_CR21","unstructured":"Gildenblat, J., contributors: PyTorch library for CAM methods. GitHub. https:\/\/github.com\/jacobgil\/pytorch-grad-cam [Accessed: (Nov. 11, 2024)] (2021)"},{"key":"3803_CR22","first-page":"307","volume":"2","author":"LS Shapley","year":"1953","unstructured":"Shapley, L.S.: A value for N-person games. Contrib. Theor. Games 2, 307\u2013317 (1953)","journal-title":"Contrib. Theor. Games"},{"key":"3803_CR23","doi-asserted-by":"crossref","unstructured":"Rozemberczki, B., Watson, L., Bayer, P., Yang, H., Kiss, O., Nilsson, S., Sarkar, R.: The Shapley Value in Machine Learning. In: International Joint Conference on Artificial Intelligence, pp. 5572\u20135579 (2022)","DOI":"10.24963\/ijcai.2022\/778"},{"key":"3803_CR24","unstructured":"Lundberg, S.M., Lee, S.: A Unified Approach to Interpreting Model Predictions. In: Advances in Neural Information Processing Systems, pp. 4765\u20134774 (2017)"},{"key":"3803_CR25","unstructured":"Ghorbani, A., Zou, J.Y.: Data Shapley: Equitable Valuation of Data for Machine Learning. In: International Conference on Machine Learning, pp. 2242\u20132251 (2019)"},{"key":"3803_CR26","unstructured":"Wang, J.T., Mittal, P., Song, D., Jia, R.: Data Shapley in One Training Run. arXiv preprint arXiv:2406.11011 (2024)"},{"key":"3803_CR27","unstructured":"Minh, A.P.T.: Overview of Class Activation Maps for Visualization Explainability. arXiv preprint arXiv:2309.14304 (2023)"},{"key":"3803_CR28","unstructured":"Fernandez, F.-G.: TorchCAM: class activation explorer. GitHub. https:\/\/github.com\/frgfm\/torch-cam [Accessed: (Nov. 11, 2024)] (2020)"},{"key":"3803_CR29","doi-asserted-by":"crossref","unstructured":"Algaba, E., Fragnelli, V., S\u00e1nchez-Soriano, J.: Handbook of the Shapley Value, (2019)","DOI":"10.1201\/9781351241410"},{"key":"3803_CR30","unstructured":"Mazumder, M., Banbury, C.R., Yao, X., Karlas, B., Rojas, W.G., Diamos, S.F., Diamos, G., He, L., Parrish, A., Kirk, H.R., Quaye, J., Rastogi, C., Kiela, D., Jurado, D., Kanter, D., Mosquera, R., Cukierski, W., Ciro, J., Aroyo, L., Acun, B., Chen, L., Raje, M., Bartolo, M., Eyuboglu, E.S., Ghorbani, A., Goodman, E.D., Howard, A., Inel, O., Kane, T., Kirkpatrick, C.R., Sculley, D., Kuo, T., Mueller, J.W., Thrush, T., Vanschoren, J., Warren, M., Williams, A., Yeung, S., Ardalani, N., Paritosh, P.K., Zhang, C., Zou, J.Y., Wu, C., Coleman, C., Ng, A.Y., Mattson, P., Reddi, V.J.: DataPerf: Benchmarks for Data-Centric AI Development. In: Advances in Neural Information Processing Systems, pp. 5320\u20135347 (2023)"},{"key":"3803_CR31","unstructured":"Dagr\u00e9ou, M., Ablin, P., Vaiter, S., Moreau, T.: How to compute Hessian-vector products? In: ICLR Blogposts 2024 (2024). https:\/\/iclr-blogposts.github.io\/2024\/blog\/bench-hvp\/ [Accessed: (Nov. 11, 2024)]"},{"key":"3803_CR32","doi-asserted-by":"crossref","unstructured":"Lerma, M., Lucas, M.: Pre or Post-Softmax Scores in Gradient-based Attribution Methods, What is Best? In: International Conference on Pattern Recognition Systems, pp. 1\u20134 (2023)","DOI":"10.1109\/ICPRS58416.2023.10179032"},{"key":"3803_CR33","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep Residual Learning for Image Recognition. In: Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"3803_CR34","unstructured":"Wang, J.T., Jia, R.: Data Banzhaf: A Robust Data Valuation Framework for Machine Learning. In: International Conference on Artificial Intelligence and Statistics, pp. 6388\u20136421 (2023)"},{"key":"3803_CR35","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein, M.S., Berg, A.C., Fei-Fei, L.: ImageNet large scale visual recognition challenge. Int. J. Comput. Vision 115, 211\u2013252 (2015)","journal-title":"Int. J. Comput. Vision"},{"key":"3803_CR36","doi-asserted-by":"crossref","unstructured":"Xie, S., Girshick, R.B., Doll\u00e1r, P., Tu, Z., He, K.: Aggregated Residual Transformations for Deep Neural Networks. In: Conference on Computer Vision and Pattern Recognition, pp. 5987\u20135995 (2017)","DOI":"10.1109\/CVPR.2017.634"},{"key":"3803_CR37","unstructured":"Simonyan, K., Zisserman, A.: Very Deep Convolutional Networks for Large-Scale Image Recognition. In: International Conference on Learning Representations (2015)"},{"key":"3803_CR38","unstructured":"Tan, M., Le, Q.V.: EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks. In: International Conference on Machine Learning, pp. 6105\u20136114 (2019)"},{"key":"3803_CR39","doi-asserted-by":"crossref","unstructured":"Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., Chen, L.-C.: MobileNetV2: Inverted Residuals and Linear Bottlenecks. In: Conference on Computer Vision and Pattern Recognition, pp. 4510\u20134520 (2018)","DOI":"10.1109\/CVPR.2018.00474"},{"key":"3803_CR40","doi-asserted-by":"crossref","unstructured":"Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin Transformer: Hierarchical Vision Transformer using Shifted Windows. In: International Conference on Computer Vision, pp. 9992\u201310002 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"3803_CR41","unstructured":"Wightman, R.: PyTorch Image Models. GitHub. https:\/\/github.com\/rwightman\/pytorch-image-models [Accessed: (Nov. 11, 2024)] (2019)"},{"key":"3803_CR42","doi-asserted-by":"crossref","unstructured":"Poppi, S., Cornia, M., Baraldi, L., Cucchiara, R.: Revisiting the Evaluation of Class Activation Mapping for Explainability: A Novel Metric and Experimental Analysis. In: Conference on Computer Vision and Pattern Recognition Workshops, pp. 2299\u20132304 (2021)","DOI":"10.1109\/CVPRW53098.2021.00260"}],"container-title":["The Visual Computer"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-025-03803-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00371-025-03803-1\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-025-03803-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,6]],"date-time":"2025-09-06T03:40:47Z","timestamp":1757130047000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00371-025-03803-1"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,1,21]]},"references-count":42,"journal-issue":{"issue":"10","published-print":{"date-parts":[[2025,8]]}},"alternative-id":["3803"],"URL":"https:\/\/doi.org\/10.1007\/s00371-025-03803-1","relation":{},"ISSN":["0178-2789","1432-2315"],"issn-type":[{"value":"0178-2789","type":"print"},{"value":"1432-2315","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,1,21]]},"assertion":[{"value":"4 January 2025","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 January 2025","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}