{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,5,24]],"date-time":"2025-05-24T09:10:10Z","timestamp":1748077810130,"version":"3.41.0"},"reference-count":44,"publisher":"Springer Science and Business Media LLC","issue":"17","license":[{"start":{"date-parts":[[2024,7,10]],"date-time":"2024-07-10T00:00:00Z","timestamp":1720569600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,7,10]],"date-time":"2024-07-10T00:00:00Z","timestamp":1720569600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100010031","name":"Postdoctoral Research Foundation of China","doi-asserted-by":"publisher","award":["2019M661076"],"award-info":[{"award-number":["2019M661076"]}],"id":[{"id":"10.13039\/501100010031","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"DOI":"10.1007\/s11042-024-19813-5","type":"journal-article","created":{"date-parts":[[2024,7,10]],"date-time":"2024-07-10T07:02:31Z","timestamp":1720594951000},"page":"18445-18465","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["RALFusion: a residual attention guided lightweight deep-learning framework for infrared and visible image fusion"],"prefix":"10.1007","volume":"84","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6950-8086","authenticated-orcid":false,"given":"Ting","family":"Liu","sequence":"first","affiliation":[]},{"given":"Yuxin","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Guofeng","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Peiqi","family":"Luo","sequence":"additional","affiliation":[]},{"given":"Yunsheng","family":"Fan","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,7,10]]},"reference":[{"key":"19813_CR1","doi-asserted-by":"publisher","first-page":"153","DOI":"10.1016\/j.inffus.2018.02.004","volume":"45","author":"J Ma","year":"2019","unstructured":"Ma J, Ma Y, Li C (2019) Infrared and visible image fusion methods and applications: a survey[J]. Inform Fusion 45:153\u2013178","journal-title":"Inform Fusion"},{"key":"19813_CR2","doi-asserted-by":"publisher","first-page":"323","DOI":"10.1016\/j.inffus.2021.06.008","volume":"76","author":"H Zhang","year":"2021","unstructured":"Zhang H, Xu H, Tian X et al (2021) Image fusion meets deep learning: a survey and perspective[J]. Inform Fusion 76:323\u2013336","journal-title":"Inform Fusion"},{"key":"19813_CR3","doi-asserted-by":"publisher","first-page":"166","DOI":"10.1016\/j.inffus.2020.05.002","volume":"63","author":"X Zhang","year":"2020","unstructured":"Zhang X, Ye P, Leung H et al (2020) Object fusion tracking based on visible and infrared images: a comprehensive review[J]. Inform Fusion 63:166\u2013187","journal-title":"Inform Fusion"},{"key":"19813_CR4","first-page":"671","volume-title":"The Laplacian pyramid as a compact image code[M]\/\/readings in computer vision","author":"PJ Burt","year":"1987","unstructured":"Burt PJ, Adelson EH (1987) The Laplacian pyramid as a compact image code[M]\/\/readings in computer vision. Morgan Kaufmann, pp 671\u2013679"},{"issue":"4","key":"19813_CR5","doi-asserted-by":"publisher","first-page":"245","DOI":"10.1016\/0167-8655(89)90003-2","volume":"9","author":"A Toet","year":"1989","unstructured":"Toet A (1989) Image fusion by a ratio of low-pass pyramid[J]. Pattern Recogn Lett 9(4):245\u2013253","journal-title":"Pattern Recogn Lett"},{"issue":"6","key":"19813_CR6","doi-asserted-by":"publisher","first-page":"123","DOI":"10.1109\/MSP.2005.1550194","volume":"22","author":"IW Selesnick","year":"2005","unstructured":"Selesnick IW, Baraniuk RG, Kingsbury NC (2005) The dual-tree complex wavelet transform[J]. IEEE Signal Process Mag 22(6):123\u2013151","journal-title":"IEEE Signal Process Mag"},{"issue":"21","key":"19813_CR7","doi-asserted-by":"publisher","first-page":"10989","DOI":"10.3390\/app122110989","volume":"12","author":"Y Dong","year":"2022","unstructured":"Dong Y, Chen Z, Li Z et al (2022) A multi-branch multi-scale deep learning image fusion algorithm based on DenseNet[J]. Appl Sci 12(21):10989","journal-title":"Appl Sci"},{"issue":"6","key":"19813_CR8","doi-asserted-by":"publisher","first-page":"1575","DOI":"10.1049\/ipr2.12431","volume":"16","author":"H Wang","year":"2022","unstructured":"Wang H, An W, Li L et al (2022) Infrared and visible image fusion based on multi-channel convolutional neural network[J]. IET Image Process 16(6):1575\u20131584","journal-title":"IET Image Process"},{"key":"19813_CR9","doi-asserted-by":"publisher","first-page":"166","DOI":"10.1016\/j.neucom.2019.06.102","volume":"370","author":"X Jin","year":"2019","unstructured":"Jin X, Xiong Q, Xiong C et al (2019) Single image super-resolution with multi-level feature fusion recursive network[J]. Neurocomputing 370:166\u2013173","journal-title":"Neurocomputing"},{"key":"19813_CR10","doi-asserted-by":"publisher","first-page":"191","DOI":"10.1016\/j.inffus.2016.12.001","volume":"36","author":"Y Liu","year":"2017","unstructured":"Liu Y, Chen X, Peng H et al (2017) Multi-focus image fusion with a deep convolutional neural network[J]. Inform Fusion 36:191\u2013207","journal-title":"Inform Fusion"},{"doi-asserted-by":"crossref","unstructured":"Li H, Wu X J, Kittler J (2018) Infrared and visible image fusion using a deep learning framework[C]. 2018 24th international conference on pattern recognition (ICPR). IEEE, pp 2705\u20132710","key":"19813_CR11","DOI":"10.1109\/ICPR.2018.8546006"},{"key":"19813_CR12","doi-asserted-by":"publisher","first-page":"128","DOI":"10.1016\/j.inffus.2020.11.009","volume":"69","author":"Y Long","year":"2021","unstructured":"Long Y, Jia H, Zhong Y et al (2021) RXDNFuse: a aggregated residual dense network for infrared and visible image fusion[J]. Inform Fusion 69:128\u2013141","journal-title":"Inform Fusion"},{"key":"19813_CR13","doi-asserted-by":"publisher","first-page":"28","DOI":"10.1016\/j.inffus.2021.12.004","volume":"82","author":"L Tang","year":"2022","unstructured":"Tang L, Yuan J, Ma J (2022) Image fusion in the loop of high-level vision tasks: a semantic-aware real-time infrared and visible image fusion network[J]. Inform Fusion 82:28\u201342","journal-title":"Inform Fusion"},{"key":"19813_CR14","doi-asserted-by":"publisher","first-page":"79","DOI":"10.1016\/j.inffus.2022.03.007","volume":"83","author":"L Tang","year":"2022","unstructured":"Tang L, Yuan J, Zhang H et al (2022) PIAFusion: a progressive infrared and visible image fusion network based on illumination aware[J]. Inform Fusion 83:79\u201392","journal-title":"Inform Fusion"},{"issue":"7","key":"19813_CR15","doi-asserted-by":"publisher","first-page":"9277","DOI":"10.1007\/s11042-021-11549-w","volume":"81","author":"L Zhang","year":"2022","unstructured":"Zhang L, Li H, Zhu R, Du P (2022) An infrared and visible image fusion algorithm based on resnet-152. Multimed Tools Appl 81(7):9277\u20139287","journal-title":"Multimed Tools Appl"},{"issue":"11","key":"19813_CR16","doi-asserted-by":"publisher","first-page":"139","DOI":"10.1145\/3422622","volume":"63","author":"I Goodfellow","year":"2020","unstructured":"Goodfellow I, Pouget-Abadie J, Mirza M, Xu B, Warde-Farley D, Ozair S, Courville A, Bengio Y (2020) Generative adversarial networks. Commun ACM 63(11):139\u2013144","journal-title":"Commun ACM"},{"key":"19813_CR17","doi-asserted-by":"publisher","first-page":"11","DOI":"10.1016\/j.inffus.2018.09.004","volume":"48","author":"J Ma","year":"2019","unstructured":"Ma J, Yu W, Liang P et al (2019) FusionGAN: A generative adversarial network for infrared and visible image fusion[J]. Information fusion 48:11\u201326","journal-title":"Information fusion"},{"key":"19813_CR18","doi-asserted-by":"publisher","first-page":"7800","DOI":"10.1109\/TMM.2022.3228685","volume":"25","author":"Z Wang","year":"2022","unstructured":"Wang Z, Shao W, Chen Y et al (2022) Infrared and visible image fusion via interactive compensatory attention adversarial learning[J]. IEEE Trans Multim 25:7800\u20137813","journal-title":"IEEE Trans Multim"},{"issue":"5","key":"19813_CR19","doi-asserted-by":"publisher","first-page":"2614","DOI":"10.1109\/TIP.2018.2887342","volume":"28","author":"H Li","year":"2018","unstructured":"Li H, Wu XJ (2018) DenseFuse: a fusion approach to infrared and visible images[J]. IEEE Trans Image Process 28(5):2614\u20132623","journal-title":"IEEE Trans Image Process"},{"issue":"12","key":"19813_CR20","doi-asserted-by":"publisher","first-page":"9645","DOI":"10.1109\/TIM.2020.3005230","volume":"69","author":"H Li","year":"2020","unstructured":"Li H, Wu XJ, Durrani T (2020) NestFuse: an infrared and visible image fusion architecture based on nest connection and spatial\/channel attention models[J]. IEEE Trans Instrum Meas 69(12):9645\u20139656","journal-title":"IEEE Trans Instrum Meas"},{"key":"19813_CR21","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/TIM.2020.3022438","volume":"70","author":"L Jian","year":"2020","unstructured":"Jian L, Yang X, Liu Z et al (2020) SEDRFuse: a symmetric encoder\u2013decoder with residual block network for infrared and visible image fusion[J]. IEEE Trans Instrum Meas 70:1\u201315","journal-title":"IEEE Trans Instrum Meas"},{"key":"19813_CR22","doi-asserted-by":"publisher","first-page":"72","DOI":"10.1016\/j.inffus.2021.02.023","volume":"73","author":"H Li","year":"2021","unstructured":"Li H, Wu XJ, Kittler J (2021) RFN-Nest: an end-to-end residual fusion network for infrared and visible images[J]. Inform Fusion 73:72\u201386","journal-title":"Inform Fusion"},{"doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S et al (2016) Deep residual learning for image recognition[C]. 2016 IEEE conference on computer vision and pattern recognition (CVPR), pp 770\u2013778","key":"19813_CR23","DOI":"10.1109\/CVPR.2016.90"},{"issue":"8","key":"19813_CR24","doi-asserted-by":"publisher","first-page":"2011","DOI":"10.1109\/TPAMI.2019.2913372","volume":"42","author":"J Hu","year":"2020","unstructured":"Hu J, Shen L, Albanie S, Sun G, Wu E (2020) Squeeze-and-excitation networks[J]. IEEE Trans Pattern Anal Mach Intell 42(8):2011\u20132023","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"unstructured":"Vaswani A, Shazeer N, Parmar N et al (2017) Attention is all you need[C]. 31st Conference on Neural Information Processing Systems (NIPS 2017), pp 1\u201315","key":"19813_CR25"},{"issue":"4","key":"19813_CR26","doi-asserted-by":"publisher","first-page":"834","DOI":"10.1109\/TPAMI.2017.2699184","volume":"40","author":"LC Chen","year":"2017","unstructured":"Chen LC, Papandreou G, Kokkinos I et al (2017) Deeplab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs[J]. IEEE Trans Pattern Anal Mach Intell 40(4):834\u2013848","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"doi-asserted-by":"crossref","unstructured":"Zhao H, Shi J, Qi X, Wang X, Jia J (2017) Pyramid scene parsing network[C]. 2017 IEEE conference on computer vision and pattern recognition (CVPR), pp 6230\u20136239","key":"19813_CR27","DOI":"10.1109\/CVPR.2017.660"},{"doi-asserted-by":"crossref","unstructured":"Liu S, Huang D, Wang Y (2018) Receptive field block net for accurate and fast object detection[C]. \u00a0European Conference on Computer Vision (ECCV 2018) 11215: 404\u2013419\u00a0","key":"19813_CR28","DOI":"10.1007\/978-3-030-01252-6_24"},{"doi-asserted-by":"crossref","unstructured":"Jia X, Zhu C, Li M et al (2021) LLVIP: a visible-infrared paired dataset for low-light vision[C]. 2021 IEEE\/CVF international conference on computer vision, pp 3496\u20133504","key":"19813_CR29","DOI":"10.1109\/ICCVW54120.2021.00389"},{"issue":"1","key":"19813_CR30","doi-asserted-by":"publisher","first-page":"023522","DOI":"10.1117\/1.2945910","volume":"2","author":"JW Roberts","year":"2008","unstructured":"Roberts JW, Van Aardt JA, Ahmed FB (2008) Assessment of image fusion procedures using entropy, image quality, and multispectral classification[J]. J Appl Remote Sens 2(1):023522","journal-title":"J Appl Remote Sens"},{"issue":"4","key":"19813_CR31","doi-asserted-by":"publisher","first-page":"308","DOI":"10.1049\/el:20000267","volume":"36","author":"CS Xydeas","year":"2000","unstructured":"Xydeas CS, Petrovic V (2000) Objective image fusion performance measure[J]. Electron Lett 36(4):308\u2013309","journal-title":"Electron Lett"},{"issue":"12","key":"19813_CR32","doi-asserted-by":"publisher","first-page":"1890","DOI":"10.1016\/j.aeue.2015.09.004","volume":"69","author":"V Aslantas","year":"2015","unstructured":"Aslantas V, Bendes E (2015) A new image quality metric for image fusion: the sum of the correlations of differences[J]. Aeu-Int J Electron Commun 69(12):1890\u20131896","journal-title":"Aeu-Int J Electron Commun"},{"issue":"4","key":"19813_CR33","doi-asserted-by":"publisher","first-page":"600","DOI":"10.1109\/TIP.2003.819861","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang Z, Bovik AC, Sheikh HR et al (2004) Image quality assessment: from error visibility to structural similarity[J]. IEEE Trans Image Process 13(4):600\u2013612","journal-title":"IEEE Trans Image Process"},{"issue":"11","key":"19813_CR34","doi-asserted-by":"publisher","first-page":"3345","DOI":"10.1109\/TIP.2015.2442920","volume":"24","author":"K Ma","year":"2015","unstructured":"Ma K, Zeng K, Wang Z (2015) Perceptual quality assessment for multi-exposure image fusion[J]. IEEE Trans Image Process 24(11):3345\u20133356","journal-title":"IEEE Trans Image Process"},{"doi-asserted-by":"crossref","unstructured":"Woo S, Park J, Lee J, Kweon I (2018) CBAM: convolutional block attention module [C]. European Conference on Computer Vision (ECCV 2018) 11211:3\u201319","key":"19813_CR35","DOI":"10.1007\/978-3-030-01234-2_1"},{"unstructured":"Park J, Woo S, Lee J Y et al (2018) Bam: bottleneck attention module[C]. BMVC 2018, pp 1\u201314","key":"19813_CR36"},{"doi-asserted-by":"crossref","unstructured":"Wang Q, Wu B, Zhu P, Li P, Zuo W, Hu Q\u00a0(2020) ECA-Net: efficient channel attention for deep convolutional neural networks[C]. 2020 IEEE\/CVF conference on computer vision and pattern recognition, pp 11534\u201311542","key":"19813_CR37","DOI":"10.1109\/CVPR42600.2020.01155"},{"doi-asserted-by":"crossref","unstructured":"Hou Q, Zhou D, Feng J (2021) Coordinate attention for efficient mobile network design[C]. 2021 IEEE\/CVF conference on computer vision and pattern recognition, pp 13713\u201313722","key":"19813_CR38","DOI":"10.1109\/CVPR46437.2021.01350"},{"key":"19813_CR39","doi-asserted-by":"publisher","first-page":"1193","DOI":"10.1007\/s11760-013-0556-9","volume":"9","author":"BK Shreyamsha Kumar","year":"2015","unstructured":"Shreyamsha Kumar BK (2015) Image fusion based on pixel significance using cross bilateral filter[J]. SIViP 9:1193\u20131204","journal-title":"SIViP"},{"key":"19813_CR40","doi-asserted-by":"publisher","first-page":"100","DOI":"10.1016\/j.inffus.2016.02.001","volume":"31","author":"J Ma","year":"2016","unstructured":"Ma J, Chen C, Li C et al (2016) Infrared and visible image fusion via gradient transfer and total variation minimization[J]. Inform Fusion 31:100\u2013109","journal-title":"Inform Fusion"},{"issue":"12","key":"19813_CR41","doi-asserted-by":"publisher","first-page":"1882","DOI":"10.1109\/LSP.2016.2618776","volume":"23","author":"Y Liu","year":"2016","unstructured":"Liu Y, Chen X, Ward RK et al (2016) Image fusion with convolutional sparse representation[J]. IEEE Signal Process Lett 23(12):1882\u20131886","journal-title":"IEEE Signal Process Lett"},{"doi-asserted-by":"crossref","unstructured":"Fu Y, Wu XJ (2021) A dual-branch network for infrared and visible image fusion[C]. 2020 25th international conference on pattern recognition (ICPR). IEEE, pp 10675\u201310680","key":"19813_CR42","DOI":"10.1109\/ICPR48806.2021.9412293"},{"key":"19813_CR43","doi-asserted-by":"publisher","first-page":"249","DOI":"10.1016\/j.dib.2017.09.038","volume":"15","author":"T Alexander","year":"2017","unstructured":"Alexander T (2017) The TNO multiband image data collection[J]. Data Brief 15:249\u2013251","journal-title":"Data Brief"},{"doi-asserted-by":"crossref","unstructured":"Xu H, Ma J, Le Z et al (2020) Fusiondn: a unified densely connected network for image fusion[C]. Proc AAAI Conf Artif Intell 34(07):12484\u201312491","key":"19813_CR44","DOI":"10.1609\/aaai.v34i07.6936"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-024-19813-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-024-19813-5\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-024-19813-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,24]],"date-time":"2025-05-24T08:40:37Z","timestamp":1748076037000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-024-19813-5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,7,10]]},"references-count":44,"journal-issue":{"issue":"17","published-online":{"date-parts":[[2025,5]]}},"alternative-id":["19813"],"URL":"https:\/\/doi.org\/10.1007\/s11042-024-19813-5","relation":{},"ISSN":["1573-7721"],"issn-type":[{"type":"electronic","value":"1573-7721"}],"subject":[],"published":{"date-parts":[[2024,7,10]]},"assertion":[{"value":"8 August 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 March 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"2 July 2024","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"10 July 2024","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no conflicts of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflicts of interest"}}]}}