{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,13]],"date-time":"2025-10-13T00:55:04Z","timestamp":1760316904010,"version":"build-2065373602"},"publisher-location":"Cham","reference-count":43,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031882166"},{"type":"electronic","value":"9783031882173"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-88217-3_12","type":"book-chapter","created":{"date-parts":[[2025,5,26]],"date-time":"2025-05-26T10:23:25Z","timestamp":1748255005000},"page":"167-180","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Image Fusion Survey: A Novel Taxonomy Integrating Transformer and\u00a0Recent Approaches"],"prefix":"10.1007","author":[{"given":"Bernardi","family":"Gwendal","sequence":"first","affiliation":[]},{"given":"Strubel","family":"David","sequence":"additional","affiliation":[]},{"given":"Brisebarre","family":"Godefroy","sequence":"additional","affiliation":[]},{"given":"Garin","family":"Jean-Fran\u00e7ois","sequence":"additional","affiliation":[]},{"given":"Ardabilian","family":"Mohsen","sequence":"additional","affiliation":[]},{"given":"Dellandr\u00e9a","family":"Emmanuel","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,5,27]]},"reference":[{"key":"12_CR1","unstructured":"Balit, E., Chadli, A.: GMFNet: gated multimodal fusion network for visible-thermal semantic segmentation. In: Proceedings of the 16th European Conference on Computer Vision, pp.\u00a01\u20134 (2020)"},{"issue":"2","key":"12_CR2","doi-asserted-by":"crossref","first-page":"423","DOI":"10.1109\/TPAMI.2018.2798607","volume":"41","author":"T Baltru\u0161aitis","year":"2018","unstructured":"Baltru\u0161aitis, T., Ahuja, C., Morency, L.P.: Multimodal machine learning: a survey and taxonomy. IEEE Trans. Pattern Anal. Mach. Intell. 41(2), 423\u2013443 (2018)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"8","key":"12_CR3","doi-asserted-by":"crossref","first-page":"2939","DOI":"10.1007\/s00371-021-02166-7","volume":"38","author":"K Bayoudh","year":"2022","unstructured":"Bayoudh, K., Knani, R., Hamdaoui, F., Mtibaa, A.: A survey on deep multimodal learning for computer vision: advances, trends, applications, and datasets. Vis. Comput. 38(8), 2939\u20132970 (2022)","journal-title":"Vis. Comput."},{"key":"12_CR4","unstructured":"Dosovitskiy, A., et\u00a0al.: An image is worth 16x16 words: transformers for image recognition at scale. In: International Conference on Learning Representations (2020)"},{"issue":"11","key":"12_CR5","doi-asserted-by":"crossref","first-page":"139","DOI":"10.1145\/3422622","volume":"63","author":"I Goodfellow","year":"2020","unstructured":"Goodfellow, I., et al.: Generative adversarial networks. Commun. ACM 63(11), 139\u2013144 (2020)","journal-title":"Commun. ACM"},{"key":"12_CR6","doi-asserted-by":"crossref","unstructured":"Ha, Q., Watanabe, K., Karasawa, T., Ushiku, Y., Harada, T.: MFNet: towards real-time semantic segmentation for autonomous vehicles with multi-spectral scenes. In: 2017 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 5108\u20135115 (2017)","DOI":"10.1109\/IROS.2017.8206396"},{"key":"12_CR7","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"12_CR8","first-page":"6840","volume":"33","author":"J Ho","year":"2020","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Adv. Neural. Inf. Process. Syst. 33, 6840\u20136851 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"issue":"1","key":"12_CR9","doi-asserted-by":"crossref","first-page":"136","DOI":"10.1038\/s41746-020-00341-z","volume":"3","author":"SC Huang","year":"2020","unstructured":"Huang, S.C., Pareek, A., Seyyedi, S., Banerjee, I., Lungren, M.P.: Fusion of medical imaging and electronic health records using deep learning: a systematic review and implementation guidelines. NPJ Digit. Med. 3(1), 136 (2020)","journal-title":"NPJ Digit. Med."},{"key":"12_CR10","doi-asserted-by":"crossref","unstructured":"Kalamkar, S., et\u00a0al.: Multimodal image fusion: a systematic review. Decis. Anal. J., 100327 (2023)","DOI":"10.1016\/j.dajour.2023.100327"},{"key":"12_CR11","doi-asserted-by":"crossref","first-page":"185","DOI":"10.1016\/j.inffus.2022.09.019","volume":"90","author":"S Karim","year":"2023","unstructured":"Karim, S., Tong, G., Li, J., Qadir, A., Farooq, U., Yu, Y.: Current advances and future perspectives of image fusion: a comprehensive review. Inf. Fusion 90, 185\u2013217 (2023)","journal-title":"Inf. Fusion"},{"key":"12_CR12","doi-asserted-by":"crossref","unstructured":"Kim, J., Koh, J., Kim, Y., Choi, J., Hwang, Y., Choi, J.W.: Robust deep multi-modal learning based on gated information fusion network. In: Asian Conference on Computer Vision, pp. 90\u2013106 (2018)","DOI":"10.1007\/978-3-030-20870-7_6"},{"issue":"5","key":"12_CR13","doi-asserted-by":"crossref","first-page":"2614","DOI":"10.1109\/TIP.2018.2887342","volume":"28","author":"H Li","year":"2018","unstructured":"Li, H., Wu, X.J.: DenseFuse: a fusion approach to infrared and visible images. IEEE Trans. Image Process. 28(5), 2614\u20132623 (2018)","journal-title":"IEEE Trans. Image Process."},{"issue":"12","key":"12_CR14","doi-asserted-by":"crossref","first-page":"9645","DOI":"10.1109\/TIM.2020.3005230","volume":"69","author":"H Li","year":"2020","unstructured":"Li, H., Wu, X.J., Durrani, T.: NestFuse: an infrared and visible image fusion architecture based on nest connection and spatial\/channel attention models. IEEE Trans. Instrum. Meas. 69(12), 9645\u20139656 (2020)","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"12_CR15","doi-asserted-by":"crossref","unstructured":"Liu, J., et al.: Target-aware dual adversarial learning and a multi-scenario multi-modality benchmark to fuse infrared and visible for object detection. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5802\u20135811 (2022)","DOI":"10.1109\/CVPR52688.2022.00571"},{"key":"12_CR16","doi-asserted-by":"crossref","unstructured":"Liu, Q., Pi, J., Gao, P., Yuan, D.: STFNet: self-supervised transformer for infrared and visible image fusion. IEEE Trans. Emerg. Top. Comput. Intell. (2024)","DOI":"10.1109\/TETCI.2024.3352490"},{"issue":"9","key":"12_CR17","doi-asserted-by":"crossref","first-page":"929","DOI":"10.1016\/S0167-8655(01)00047-2","volume":"22","author":"Z Liu","year":"2001","unstructured":"Liu, Z., Tsukada, K., Hanasaki, K., Ho, Y.K., Dai, Y.: Image fusion by using steerable pyramid. Pattern Recogn. Lett. 22(9), 929\u2013939 (2001)","journal-title":"Pattern Recogn. Lett."},{"key":"12_CR18","doi-asserted-by":"crossref","unstructured":"Liu, Z., Geng, K., Cheng, X., Shen, K., Li, A., Cheng, S.: CMAFusion: cross modal attention based end-to-end infrared and visible image fusion network. In: 2023 7th CAA International Conference on Vehicular Control and Intelligence (CVCI), pp.\u00a01\u20136. IEEE (2023)","DOI":"10.1109\/CVCI59596.2023.10397304"},{"key":"12_CR19","doi-asserted-by":"crossref","first-page":"4980","DOI":"10.1109\/TIP.2020.2977573","volume":"29","author":"J Ma","year":"2020","unstructured":"Ma, J., Xu, H., Jiang, J., Mei, X., Zhang, X.P.: DDcGAN: a dual-discriminator conditional generative adversarial network for multi-resolution image fusion. IEEE Trans. Image Process. 29, 4980\u20134995 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"12_CR20","doi-asserted-by":"crossref","first-page":"11","DOI":"10.1016\/j.inffus.2018.09.004","volume":"48","author":"J Ma","year":"2019","unstructured":"Ma, J., Yu, W., Liang, P., Li, C., Jiang, J.: FusionGAN: a generative adversarial network for infrared and visible image fusion. Inf. Fusion 48, 11\u201326 (2019)","journal-title":"Inf. Fusion"},{"key":"12_CR21","first-page":"1","volume":"70","author":"J Ma","year":"2020","unstructured":"Ma, J., Zhang, H., Shao, Z., Liang, P., Xu, H.: GANMcC: a generative adversarial network with multiclassification constraints for infrared and visible image fusion. IEEE Trans. Instrum. Meas. 70, 1\u201314 (2020)","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"12_CR22","doi-asserted-by":"crossref","first-page":"12405","DOI":"10.1007\/s11042-017-4895-3","volume":"77","author":"N Paramanandham","year":"2018","unstructured":"Paramanandham, N., Rajendiran, K.: Multi sensor image fusion for surveillance applications using hybrid image fusion algorithm. Multimedia Tools Appl. 77, 12405\u201312436 (2018)","journal-title":"Multimedia Tools Appl."},{"key":"12_CR23","doi-asserted-by":"publisher","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-Net: convolutional networks for biomedical image segmentation. In: Navab, N., Hornegger, J., Wells, W.M., Frangi, A.F. (eds.) MICCAI 2015. LNCS, vol. 9351, pp. 234\u2013241. Springer, Cham (2015). https:\/\/doi.org\/10.1007\/978-3-319-24574-4_28","DOI":"10.1007\/978-3-319-24574-4_28"},{"issue":"6","key":"12_CR24","doi-asserted-by":"publisher","first-page":"979","DOI":"10.1007\/s11548-021-02391-4","volume":"16","author":"J Song","year":"2021","unstructured":"Song, J., et al.: Multiview multimodal network for breast cancer diagnosis in contrast-enhanced spectral mammography images. Int. J. Comput. Assist. Radiol. Surg. 16(6), 979\u2013988 (2021). https:\/\/doi.org\/10.1007\/s11548-021-02391-4","journal-title":"Int. J. Comput. Assist. Radiol. Surg."},{"issue":"3","key":"12_CR25","doi-asserted-by":"crossref","first-page":"2576","DOI":"10.1109\/LRA.2019.2904733","volume":"4","author":"Y Sun","year":"2019","unstructured":"Sun, Y., Zuo, W., Liu, M.: RTFNet: RGB-thermal fusion network for semantic segmentation of urban scenes. IEEE Robot. Autom. Lett. 4(3), 2576\u20132583 (2019)","journal-title":"IEEE Robot. Autom. Lett."},{"key":"12_CR26","doi-asserted-by":"crossref","first-page":"28","DOI":"10.1016\/j.inffus.2021.12.004","volume":"82","author":"L Tang","year":"2022","unstructured":"Tang, L., Yuan, J., Ma, J.: Image fusion in the loop of high-level vision tasks: a semantic-aware real-time infrared and visible image fusion network. Inf. Fusion 82, 28\u201342 (2022)","journal-title":"Inf. Fusion"},{"key":"12_CR27","unstructured":"Vaswani, A., et al.: Attention is all you need. Adv. Neural Inf. Process. Syst. 30 (2017)"},{"key":"12_CR28","doi-asserted-by":"crossref","DOI":"10.1016\/j.sigpro.2022.108637","volume":"200","author":"FG Veshki","year":"2022","unstructured":"Veshki, F.G., Ouzir, N., Vorobyov, S.A., Ollila, E.: Multimodal image fusion via coupled feature learning. Signal Process. 200, 108637 (2022)","journal-title":"Signal Process."},{"key":"12_CR29","doi-asserted-by":"crossref","unstructured":"Vielzeuf, V., Lechervy, A., Pateux, S., Jurie, F.: CentralNet: a multilayer approach for multimodal fusion. In: Proceedings of the European Conference on Computer Vision (ECCV) Workshops (2018)","DOI":"10.1007\/978-3-030-11024-6_44"},{"key":"12_CR30","doi-asserted-by":"crossref","unstructured":"Vs, V., Valanarasu, J.M.J., Oza, P., Patel, V.M.: Image fusion transformer. In: 2022 IEEE International Conference on Image Processing (ICIP), pp. 3566\u20133570 (2022)","DOI":"10.1109\/ICIP46576.2022.9897280"},{"key":"12_CR31","volume":"132","author":"Y Wang","year":"2024","unstructured":"Wang, Y., Pu, J., Miao, D., Zhang, L., Zhang, L., Du, X.: SCGRFuse: an infrared and visible image fusion network based on spatial\/channel attention mechanism and gradient aggregation residual dense blocks. Eng. Appl. Artif. Intell. 132, 107898 (2024)","journal-title":"Eng. Appl. Artif. Intell."},{"issue":"4","key":"12_CR32","doi-asserted-by":"crossref","first-page":"600","DOI":"10.1109\/TIP.2003.819861","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image quality assessment: from error visibility to structural similarity. IEEE Trans. Image Process. 13(4), 600\u2013612 (2004)","journal-title":"IEEE Trans. Image Process."},{"issue":"1","key":"12_CR33","doi-asserted-by":"crossref","first-page":"502","DOI":"10.1109\/TPAMI.2020.3012548","volume":"44","author":"H Xu","year":"2020","unstructured":"Xu, H., Ma, J., Jiang, J., Guo, X., Ling, H.: U2Fusion: a unified unsupervised image fusion network. IEEE Trans. Pattern Anal. Mach. Intell. 44(1), 502\u2013518 (2020)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"07","key":"12_CR34","first-page":"12484","volume":"34","author":"H Xu","year":"2020","unstructured":"Xu, H., Ma, J., Le, Z., Jiang, J., Guo, X.: FusionDN: a unified densely connected network for image fusion. Proc. AAAI Conf. Artif. Intel. 34(07), 12484\u201312491 (2020)","journal-title":"Proc. AAAI Conf. Artif. Intel."},{"key":"12_CR35","doi-asserted-by":"crossref","first-page":"7203","DOI":"10.1109\/TIP.2020.2999855","volume":"29","author":"H Xu","year":"2020","unstructured":"Xu, H., Ma, J., Zhang, X.P.: MEF-GAN: multi-exposure image fusion via generative adversarial networks. IEEE Trans. Image Process. 29, 7203\u20137216 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"12_CR36","doi-asserted-by":"crossref","unstructured":"Xu, L., Wang, Z., Wu, B., Lui, S.: MDAN: multi-level dependent attention network for visual emotion analysis. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9479\u20139488 (2022)","DOI":"10.1109\/CVPR52688.2022.00926"},{"issue":"8","key":"12_CR37","doi-asserted-by":"crossref","first-page":"4684","DOI":"10.3390\/app13084684","volume":"13","author":"X Xu","year":"2023","unstructured":"Xu, X., Shen, Y., Han, S.: Dense-FG: a fusion GAN model by using densely connected blocks to fuse infrared and visible images. Appl. Sci. 13(8), 4684 (2023)","journal-title":"Appl. Sci."},{"key":"12_CR38","doi-asserted-by":"crossref","unstructured":"Yang, L., Guo, B., Ni, W.: Multifocus image fusion algorithm based on contourlet decomposition and region statistics. In: Fourth International Conference on Image and Graphics (ICIG 2007), pp. 707\u2013712 (2007)","DOI":"10.1109\/ICIG.2007.135"},{"key":"12_CR39","doi-asserted-by":"crossref","first-page":"1134","DOI":"10.1109\/TCI.2021.3119954","volume":"7","author":"H Zhang","year":"2021","unstructured":"Zhang, H., Yuan, J., Tian, X., Ma, J.: GAN-FM: infrared and visible image fusion using GAN with full-scale skip connection and dual Markovian discriminators. IEEE Trans. Comput. Imaging 7, 1134\u20131147 (2021)","journal-title":"IEEE Trans. Comput. Imaging"},{"key":"12_CR40","doi-asserted-by":"crossref","first-page":"57","DOI":"10.1016\/j.inffus.2017.05.006","volume":"40","author":"Q Zhang","year":"2018","unstructured":"Zhang, Q., Liu, Y., Blum, R.S., Han, J., Tao, D.: Sparse representation based multi-sensor image fusion for multi-focus and multi-modality images: a review. Inf. Fusion 40, 57\u201375 (2018)","journal-title":"Inf. Fusion"},{"key":"12_CR41","doi-asserted-by":"crossref","first-page":"99","DOI":"10.1016\/j.inffus.2019.07.011","volume":"54","author":"Y Zhang","year":"2020","unstructured":"Zhang, Y., Liu, Y., Sun, P., Yan, H., Zhao, X., Zhang, L.: IFCNN: a general image fusion framework based on convolutional neural network. Inf. Fusion 54, 99\u2013118 (2020)","journal-title":"Inf. Fusion"},{"key":"12_CR42","doi-asserted-by":"crossref","unstructured":"Zhao, Z., et al.: DDFM: denoising diffusion model for multi-modality image fusion. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 8082\u20138093 (2023)","DOI":"10.1109\/ICCV51070.2023.00742"},{"key":"12_CR43","doi-asserted-by":"crossref","unstructured":"Zhou, Z., Rahman\u00a0Siddiquee, M.M., Tajbakhsh, N., Liang, J.: UNet++: a nested U-net architecture for medical image segmentation. In: Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support: 4th International Workshop, DLMIA 2018, and 8th International Workshop, ML-CDS 2018, Held in Conjunction with MICCAI 2018, Granada, Spain, 20 September 2018, Proceedings 4, pp. 3\u201311 (2018)","DOI":"10.1007\/978-3-030-00889-5_1"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition. ICPR 2024 International Workshops and Challenges"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-88217-3_12","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,12]],"date-time":"2025-10-12T19:13:39Z","timestamp":1760296419000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-88217-3_12"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031882166","9783031882173"],"references-count":43,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-88217-3_12","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"27 May 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICPR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Pattern Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Kolkata","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"India","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1 December 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5 December 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icpr2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/icpr2024.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}