{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,27]],"date-time":"2026-01-27T01:20:14Z","timestamp":1769476814554,"version":"3.49.0"},"reference-count":46,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,11,21]],"date-time":"2025-11-21T00:00:00Z","timestamp":1763683200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,11,21]],"date-time":"2025-11-21T00:00:00Z","timestamp":1763683200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100000780","name":"European Union","doi-asserted-by":"crossref","award":["334906"],"award-info":[{"award-number":["334906"]}],"id":[{"id":"10.13039\/501100000780","id-type":"DOI","asserted-by":"crossref"}]},{"DOI":"10.13039\/501100000780","name":"European Union","doi-asserted-by":"crossref","award":["334906"],"award-info":[{"award-number":["334906"]}],"id":[{"id":"10.13039\/501100000780","id-type":"DOI","asserted-by":"crossref"}]},{"DOI":"10.13039\/501100000780","name":"European Union","doi-asserted-by":"crossref","award":["334906"],"award-info":[{"award-number":["334906"]}],"id":[{"id":"10.13039\/501100000780","id-type":"DOI","asserted-by":"crossref"}]},{"DOI":"10.13039\/501100000780","name":"European Union","doi-asserted-by":"crossref","award":["334906"],"award-info":[{"award-number":["334906"]}],"id":[{"id":"10.13039\/501100000780","id-type":"DOI","asserted-by":"crossref"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Machine Vision and Applications"],"published-print":{"date-parts":[[2026,1]]},"DOI":"10.1007\/s00138-025-01766-w","type":"journal-article","created":{"date-parts":[[2025,11,21]],"date-time":"2025-11-21T14:22:30Z","timestamp":1763734950000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["From explanation to unsupervised segmentation: fusion of multiple explanation maps for vision transformers"],"prefix":"10.1007","volume":"37","author":[{"given":"Eduard","family":"Hogea","sequence":"first","affiliation":[]},{"given":"Darian M.","family":"Onchis","sequence":"additional","affiliation":[]},{"given":"Ana","family":"Coporan","sequence":"additional","affiliation":[]},{"given":"Adina Magda","family":"Florea","sequence":"additional","affiliation":[]},{"given":"Codruta","family":"Istin","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,11,21]]},"reference":[{"key":"1766_CR1","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al.: An image is worth 16x16 words: Transformers for image recognition at scale. Preprint at arXiv:2010.11929 (2020)"},{"key":"1766_CR2","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., J\u00e9gou, H.: Training data-efficient image transformers & distillation through attention. In: International Conference on Machine Learning, pp. 10347\u201310357. PMLR (2021)"},{"key":"1766_CR3","doi-asserted-by":"crossref","unstructured":"Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"1766_CR4","unstructured":"Lai-Dang, Q.-V.: A survey of vision transformers in autonomous driving: current trends and future directions. Preprint at arXiv:2403.07542 (2024)"},{"key":"1766_CR5","doi-asserted-by":"publisher","first-page":"17493","DOI":"10.1038\/s41598-025-02111-x","volume":"15","author":"OF Hassan","year":"2025","unstructured":"Hassan, O.F., Ibrahim, A.F., Gomaa, A., Makhlouf, M.A., Bahaa Eldin, H., et al.: Real-time driver drowsiness detection using transformer architectures: a novel deep learning approach. Sci. Rep. 15, 17493 (2025). https:\/\/doi.org\/10.1038\/s41598-025-02111-x","journal-title":"Sci. Rep."},{"key":"1766_CR6","unstructured":"Srinivas, S., Fleuret, F.: Full-gradient representation for neural network visualization. Adv. Neural Inf. Process. Syst. 32 (2019)"},{"key":"1766_CR7","doi-asserted-by":"publisher","DOI":"10.1016\/j.fsisyn.2022.100217","volume":"4","author":"SH Silva","year":"2022","unstructured":"Silva, S.H., Bethany, M., Votto, A.M., Scarff, I.H., Beebe, N., Najafirad, P.: Deepfake forensics analysis: an explainable hierarchical ensemble of weakly supervised models. Forensic Sci. Int. Synergy 4, 100217 (2022). https:\/\/doi.org\/10.1016\/j.fsisyn.2022.100217","journal-title":"Forensic Sci. Int. Synergy"},{"key":"1766_CR8","doi-asserted-by":"publisher","DOI":"10.1016\/j.autcon.2024.105497","volume":"165","author":"F Forest","year":"2024","unstructured":"Forest, F., Porta, H., Tuia, D., Fink, O.: From classification to segmentation with explainable AI: a study on crack detection and growth monitoring. Autom. Constr. 165, 105497 (2024)","journal-title":"Autom. Constr."},{"issue":"3","key":"1766_CR9","doi-asserted-by":"publisher","first-page":"247","DOI":"10.1109\/JPROC.2021.3060483","volume":"109","author":"W Samek","year":"2021","unstructured":"Samek, W., Montavon, G., Lapuschkin, S., Anders, C.J., M\u00fcller, K.-R.: Explaining deep neural networks and beyond: a review of methods and applications. Proc. IEEE 109(3), 247\u2013278 (2021)","journal-title":"Proc. IEEE"},{"key":"1766_CR10","doi-asserted-by":"crossref","unstructured":"Kashefi, R., Barekatain, L., Sabokrou, M., Aghaeipoor, F.: Explainability of vision transformers: a comprehensive review and new perspectives. Preprint at arXiv:2311.06786 (2023)","DOI":"10.2139\/ssrn.5055345"},{"key":"1766_CR11","unstructured":"Simonyan, K., Vedaldi, A., Zisserman, A.: Deep inside convolutional networks: visualising image classification models and saliency maps. Preprint at arXiv:1312.6034 (2013)"},{"key":"1766_CR12","doi-asserted-by":"crossref","unstructured":"Zeiler, M.D., Fergus, R.: Visualizing and understanding convolutional networks. In: European Conference on Computer Vision, pp. 818\u2013833. Springer (2014)","DOI":"10.1007\/978-3-319-10590-1_53"},{"key":"1766_CR13","unstructured":"Springenberg, J.T., Dosovitskiy, A., Brox, T., Riedmiller, M.: Striving for simplicity: the all convolutional net. Preprint at arXiv:1412.6806 (2014)"},{"key":"1766_CR14","doi-asserted-by":"crossref","unstructured":"Selvaraju, R.R., Cogswell, M., Das, A., Vedantam, R., Parikh, D., Batra, D.: Grad-CAM: visual explanations from deep networks via gradient-based localization. In: IEEE International Conference on Computer Vision (ICCV), pp. 618\u2013626 (2017)","DOI":"10.1109\/ICCV.2017.74"},{"key":"1766_CR15","unstructured":"Sundararajan, M., Taly, A., Yan, Q.: Axiomatic attribution for deep networks. In: International Conference on Machine Learning, pp. 3319\u20133328. PMLR (2017)"},{"key":"1766_CR16","unstructured":"Smilkov, D., Thorat, N., Kim, B., Vi\u00e9gas, F., Wattenberg, M.: Smoothgrad: removing noise by adding noise. Preprint at arXiv:1706.03825 (2017)"},{"issue":"7","key":"1766_CR17","doi-asserted-by":"publisher","first-page":"0130140","DOI":"10.1371\/journal.pone.0130140","volume":"10","author":"S Bach","year":"2015","unstructured":"Bach, S., Binder, A., Montavon, G., Klauschen, F., M\u00fcller, K.-R., Samek, W.: On pixel-wise explanations for non-linear classifier decisions by layer-wise relevance propagation. PLoS One 10(7), 0130140 (2015)","journal-title":"PLoS One"},{"key":"1766_CR18","unstructured":"Shrikumar, A., Greenside, P., Kundaje, A.: Learning important features through propagating activation differences. In: International Conference on Machine Learning, pp. 3145\u20133153. PMlR (2017)"},{"key":"1766_CR19","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1016\/j.patcog.2016.11.008","volume":"65","author":"G Montavon","year":"2017","unstructured":"Montavon, G., Lapuschkin, S., Binder, A., Samek, W., M\u00fcller, K.-R.: Explaining nonlinear classification decisions with deep Taylor decomposition. Pattern Recogn. 65, 211\u2013222 (2017)","journal-title":"Pattern Recogn."},{"key":"1766_CR20","doi-asserted-by":"crossref","unstructured":"Abnar, S., Zuidema, W.: Quantifying attention flow in transformers. Preprint at arXiv:2005.00928 (2020)","DOI":"10.18653\/v1\/2020.acl-main.385"},{"key":"1766_CR21","doi-asserted-by":"crossref","unstructured":"Chefer, H., Gur, S., Wolf, L.: Transformer interpretability beyond attention visualization. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 782\u2013791 (2021)","DOI":"10.1109\/CVPR46437.2021.00084"},{"key":"1766_CR22","doi-asserted-by":"crossref","unstructured":"Leem, S., Seo, H.: Attention guided cam: visual explanations of vision transformer guided by self-attention. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, pp. 2956\u20132964 (2024)","DOI":"10.1609\/aaai.v38i4.28077"},{"key":"1766_CR23","doi-asserted-by":"crossref","unstructured":"Mallick, R., Benois-Pineau, J., Zemmari, A.: I saw: a self-attention weighted method for explanation of visual transformers. In: 2022 IEEE International Conference on Image Processing (ICIP), pp. 3271\u20133275. IEEE (2022)","DOI":"10.1109\/ICIP46576.2022.9897347"},{"key":"1766_CR24","unstructured":"Adebayo, J., Gilmer, J., Muelly, M., Goodfellow, I., Hardt, M., Kim, B.: Sanity checks for saliency maps. Adv. Neural Inf. Process. Syst. 31 (2018)"},{"key":"1766_CR25","unstructured":"Jain, S., Wallace, B.C.: Attention is not explanation. Preprint at arXiv:1902.10186 (2019)"},{"key":"1766_CR26","doi-asserted-by":"crossref","unstructured":"Serrano, S., Smith, N.A.: Is attention interpretable? Preprint at arXiv:1906.03731 (2019)","DOI":"10.18653\/v1\/P19-1282"},{"key":"1766_CR27","doi-asserted-by":"crossref","unstructured":"Fung, C., Zeng, E., Bauer, L.: Attributions for ML-based ICS anomaly detection: from theory to practice. In: Proceedings of the 31st Network and Distributed System Security Symposium (2024)","DOI":"10.14722\/ndss.2024.23216"},{"key":"1766_CR28","doi-asserted-by":"publisher","first-page":"376","DOI":"10.1007\/s42452-025-06879-5","volume":"7","author":"G Joshi","year":"2025","unstructured":"Joshi, G., Joshi, A., Shetty, M., Walambe, R., Kotecha, K., Scotti, F., Piuri, V.: Ensemble learning and eigencam-based feature analysis for improving the performance and explainability of object detection in drone imagery. Discover Appl. Sci. 7, 376 (2025). https:\/\/doi.org\/10.1007\/s42452-025-06879-5","journal-title":"Discover Appl. Sci."},{"key":"1766_CR29","doi-asserted-by":"publisher","unstructured":"Mendon\u00e7a, T., Ferreira, P.M., Marques, J.S., Marcal, A.R.S., Rozeira, J.: PH\u00b2 - a dermoscopic image database for research and benchmarking. In: 35th International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC), pp. 5437\u20135440. IEEE (2013). https:\/\/doi.org\/10.1109\/EMBC.2013.6610779","DOI":"10.1109\/EMBC.2013.6610779"},{"key":"1766_CR30","doi-asserted-by":"publisher","first-page":"303","DOI":"10.1007\/s11263-009-0275-4","volume":"88","author":"M Everingham","year":"2010","unstructured":"Everingham, M., Gool, L.V., Williams, C.K.I., Winn, J.M., Zisserman, A.: The pascal visual object classes (VOC) challenge. Int. J. Comput. Vis. 88, 303\u2013338 (2010)","journal-title":"Int. J. Comput. Vis."},{"key":"1766_CR31","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein, M.: Imagenet large scale visual recognition challenge. Int. J. Comput. Vision 115, 211\u2013252 (2015)","journal-title":"Int. J. Comput. Vision"},{"key":"1766_CR32","unstructured":"Chen, J., Lu, Y., Yu, Q., Luo, X., Adeli, E., Wang, Y., Lu, L., Yuille, A.L., Zhou, Y.: TransUNet: transformers make strong encoders for medical image segmentation. Preprint at arXiv:2102.04306 (2021)"},{"key":"1766_CR33","doi-asserted-by":"crossref","unstructured":"Wang, W., Chen, C., Ding, M., Yu, H., Zha, S., Li, J.: TransBTS: multimodal brain tumor segmentation using transformer. In: International Conference on Medical Image Computing and Computer-assisted Intervention, pp. 109\u2013119. Springer (2021)","DOI":"10.1007\/978-3-030-87193-2_11"},{"key":"1766_CR34","doi-asserted-by":"publisher","unstructured":"Hatamizadeh, A., Tang, Y., Nath, V., Yang, D., Myronenko, A., Landman, B., Roth, H.R., Xu, D.: UNETR: transformers for 3d medical image segmentation. In: IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV), pp. 1748\u20131758 (2022). https:\/\/doi.org\/10.1109\/WACV51458.2022.00181","DOI":"10.1109\/WACV51458.2022.00181"},{"key":"1766_CR35","doi-asserted-by":"crossref","unstructured":"Hatamizadeh, A., Nath, V., Tang, Y., Yang, D., Roth, H.R., Xu, D.: Swin UNETR: Swin transformers for semantic segmentation of brain tumors in MRI images. In: International MICCAI BrainLesion Workshop. Lecture Notes in Computer Science, pp. 272\u2013284 (2022)","DOI":"10.1007\/978-3-031-08999-2_22"},{"key":"1766_CR36","doi-asserted-by":"crossref","unstructured":"Cao, H., Wang, Y., Chen, J., Jiang, D., Zhang, X., Tian, Q., Wang, M.: Swin-UNet: UNet-like pure transformer for medical image segmentation. In: European Conference on Computer Vision, pp. 205\u2013218. Springer(2022)","DOI":"10.1007\/978-3-031-25066-8_9"},{"key":"1766_CR37","doi-asserted-by":"crossref","unstructured":"Kerssies, T., Cavagnero, N., Hermans, A., Norouzi, N., Averta, G., Leibe, B., Dubbelman, G., Geus, D.: Your vit is secretly an image segmentation model. In: Proceedings of the Computer Vision and Pattern Recognition Conference, pp. 25303\u201325313 (2025)","DOI":"10.1109\/CVPR52734.2025.02356"},{"key":"1766_CR38","doi-asserted-by":"crossref","unstructured":"Bakkouri, I., Afdel, K.: MLCA2F: multi-level context attentional feature fusion for COVID-19 lesion segmentation from CT scans. Signal Image Video Process. (2022)","DOI":"10.1007\/s11760-022-02325-w"},{"key":"1766_CR39","doi-asserted-by":"crossref","unstructured":"Bakkouri, I., Afdel, K., Benois-Pineau, J., Initiative, G.C.F.T.A.D.N.: BG-3DM2F: bidirectional gated 3d multi-scale feature fusion for Alzheimer\u2019s disease diagnosis. Multimed. Tools Appl. 81(8), 10743\u201310776 (2022)","DOI":"10.1007\/s11042-022-12242-2"},{"issue":"29","key":"1766_CR40","doi-asserted-by":"publisher","first-page":"20483","DOI":"10.1007\/s11042-019-07988-1","volume":"79","author":"I Bakkouri","year":"2020","unstructured":"Bakkouri, I., Afdel, K.: Computer-aided diagnosis (CAD) system based on multi-layer feature fusion network for skin lesion recognition in dermoscopy images. Multimed. Tools Appl. 79(29), 20483\u201320518 (2020)","journal-title":"Multimed. Tools Appl."},{"issue":"10","key":"1766_CR41","doi-asserted-by":"publisher","first-page":"12939","DOI":"10.1007\/s11042-018-6267-z","volume":"78","author":"I Bakkouri","year":"2019","unstructured":"Bakkouri, I., Afdel, K.: Multi-scale CNN based on region proposals for efficient breast abnormality recognition. Multimed. Tools Appl. 78(10), 12939\u201312960 (2019)","journal-title":"Multimed. Tools Appl."},{"issue":"285\u2013296","key":"1766_CR42","first-page":"23","volume":"11","author":"N Otsu","year":"1975","unstructured":"Otsu, N.: A threshold selection method from gray-level histograms. Automatica 11(285\u2013296), 23\u201327 (1975)","journal-title":"Automatica"},{"key":"1766_CR43","unstructured":"Petsiuk, V., Das, A., Saenko, K.: Rise: randomized input sampling for explanation of black-box models. Preprint at arXiv:1806.07421 (2018)"},{"key":"1766_CR44","doi-asserted-by":"crossref","unstructured":"Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.-Y., Doll\u00e1r, P., Girshick, R.: Segment anything. Preprint at arXiv:2304.02643 (2023)","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"1766_CR45","unstructured":"Group, V.G.: VGG Image Annotator (VIA). https:\/\/www.robots.ox.ac.uk\/vgg\/software\/via\/via_demo.html. Version 2.0.12, Accessed 6 April 2025 (2021)"},{"key":"1766_CR46","unstructured":"Zimmermann, K.H.: Diskrete Mathematik. BoD \u2013 Books on Demand, Norderstedt (2006). https:\/\/books.google.ro\/books?id=I-r1ywhuy0YC"}],"container-title":["Machine Vision and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00138-025-01766-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00138-025-01766-w","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00138-025-01766-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,26]],"date-time":"2026-01-26T15:07:44Z","timestamp":1769440064000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00138-025-01766-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,21]]},"references-count":46,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2026,1]]}},"alternative-id":["1766"],"URL":"https:\/\/doi.org\/10.1007\/s00138-025-01766-w","relation":{},"ISSN":["0932-8092","1432-1769"],"issn-type":[{"value":"0932-8092","type":"print"},{"value":"1432-1769","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,11,21]]},"assertion":[{"value":"14 July 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"3 October 2025","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"31 October 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 November 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no Conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"7"}}